query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Searches the images.ndjson file and counts the number of dates found per decade.
def main(input_filename, json_output): num_parsed = 0 num_total = 0 by_decade = defaultdict(int) for row in generators.read_ndjson_file(input_filename): ds = row.get('date') if not ds: continue num_total += 1 d = get_parsed_date(row) if not d: LOG.debug('Unable to parse date for %10s: %s' % (row['uniqueID'], ds)) else: num_parsed += 1 year = int(d[0] or d[1]) # ignore the range for now. by_decade[year // 10] += 1 LOG.info('Parsed %d/%d dates (%.2f%%)' % ( num_parsed, num_total, 100.0 * num_parsed / num_total)) counts_by_decade = [ { 'decade': '%d0' % decade, 'count': by_decade[decade] } for decade in sorted(by_decade.keys()) ] write_as_json_to_file(counts_by_decade, json_output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_counts(datestruct):\n response = call_responder('elasticsearch', 'query/daily_proofreader_hits')\n for rec in response['result']['hits']['hits']:\n data = rec['_source']\n if data['user'] not in datestruct:\n datestruct[data['user']] = {\"cleave\": 0, \"merge\": 0,\n \"split-supervoxel\": 0}\n if '/cleave/' in data['uri']:\n datestruct[data['user']]['cleave'] += 1\n elif '/merge' in data['uri']:\n datestruct[data['user']]['merge'] += 1\n elif '/split-supervoxel' in data['uri']:\n datestruct[data['user']]['split-supervoxel'] += 1", "def read_emijrp_json(json_file):\n with open(json_file, \"rb\") as f:\n data = json.load(f)\n f.close()\n count = 0\n previous_count = 0\n tz = WlmTZ()\n daily_count = dict() # upload count by day\n total_count = dict() # cumulative upload count by day\n user_count = dict() # upload count by user\n userd_count = dict() # user count by day\n usernd_count= dict() # new user count by day\n for k in data:\n image_info = data[k]\n user = image_info[u'username']\n datetime_image = parse_date(image_info[u'date'].strip(), tz)\n date_image = datetime.datetime.strftime(datetime_image, \"%Y-%m-%d\")\n\n if(datetime_image.date()>=START and datetime_image.date()<END) and (image_info['country']==THIS_WLM):\n #user\n if user in user_count:\n user_count[user] = user_count[user]+1\n else:\n user_count[user] = 1\n if date_image in daily_count:\n daily_count[date_image] = daily_count[date_image] +1\n total_count[date_image] = total_count[date_image] + 1\n if(not (user in userd_count[date_image])):\n userd_count[date_image].append(user)\n else:\n previous_count = count -1\n daily_count[date_image] = 1\n total_count[date_image] = previous_count +1\n userd_count[date_image] = []\n usernd_count[date_image] = []\n userd_count[date_image].append(user)\n if(user_count[user] <= 1):\n usernd_count[date_image].append(user)\n count = count +1\n return {\n 'count': count,\n 'daily count': daily_count,\n 'total count': total_count,\n 'user count': user_count,\n 'user count by day': userd_count,\n 'new user count by day': usernd_count\n }", "def process_single_date(self, date_string):\n # see if there is already a ndvi.json file in\n # the output location - if so, skip\n output_location = os.path.join(self.output_location, date_string,\"JSON\",\"NDVI\")\n if (not self.replace_existing_files) and \\\n self.check_for_existing_files(output_location, 1):\n return True\n\n input_path = os.path.join(self.input_location, date_string, \"SPLIT\")\n all_input_files = self.list_directory(input_path, self.input_location_type)\n print(\"input path is {}\".format(input_path))\n\n # list all the \"NDVI\" sub-images where RGB image passes quality check\n input_files = [filename for filename in all_input_files \\\n if \"_NDVI\" in filename and \\\n self.check_sub_image(filename, input_path)]\n\n if len(input_files) == 0:\n print(\"{}: No sub-images for date {}\".format(self.name,\n date_string))\n return\n else:\n print(\"{} found {} sub-images\".format(self.name, len(input_files)))\n # if we only want a subset of sub-images, truncate the list here\n if self.n_sub_images > 0:\n input_files = input_files[:self.n_sub_images]\n\n ndvi_vals = []\n for ndvi_file in input_files:\n coords_string = find_coords_string(ndvi_file)\n ndvi_dict = self.process_sub_image(os.path.join(input_path, ndvi_file),\n date_string, coords_string)\n ndvi_vals.append(ndvi_dict)\n\n self.save_json(ndvi_vals, \"ndvi_values.json\",\n output_location,\n self.output_location_type)\n\n return True", "def find_pictures(inDate):\n data = {}\n # Let's do some directory searching!\n day = inDate.day.zfill(2)\n month = inDate.month.zfill(2)\n year = inDate.year\n commandTemplate = constants.findPictures\n command = commandTemplate.format(year, month, day)\n foundDirectories = exec_console_command(command)\n directoriesList = foundDirectories.split('\\n')\n\n if directoriesList:\n # Find all dates + times for all directories\n data = {}\n\n for directory in directoriesList:\n fileList = exec_console_command(\"ls \" + directory).split(\"\\n\")\n\n for fileName in fileList:\n if \".NEF\" in fileName:\n # Get filepath for NEF file\n filePath = (directory + \"/\" + fileName)\n # Find timestamp of when photo was taken\n regexSearch = re.search('(?<!\\d)\\d{6}(?!\\d)', filePath)\n fileCreationTime = \"\"\n\n if regexSearch:\n fileCreationTime = regexSearch.group(0)\n fileCreationTime = fileCreationTime[:2] + ':' + fileCreationTime[2:]\n fileCreationTime = fileCreationTime[:5] + ':' + fileCreationTime[5:]\n h, m, s = fileCreationTime.split(':')\n seconds = int(h) * 3600 + int(m) * 60 + int(s)\n offset = calendar.timegm(time.localtime()) - calendar.timegm(\n time.gmtime(time.mktime(time.localtime())))\n fileCreationTimeSeconds = seconds + offset\n fileCreationTimeReadable = time.strftime('%H:%M:%S', time.gmtime(fileCreationTimeSeconds))\n\n data[fileCreationTimeReadable] = filePath\n\n return data", "def process_one_date(self, date_string):\n metrics_dict = {}\n print(\"Processing date {}\".format(date_string))\n input_location = os.path.join(self.input_location, date_string, \"RAW\")\n for filename in self.list_directory(input_location, self.input_location_type):\n if filename.endswith(\".tif\"):\n name_variable = (filename.split('.'))[1]\n variable_array = cv.imread(self.get_file(os.path.join(input_location,\n filename),\n self.input_location_type),\n cv.IMREAD_ANYDEPTH)\n\n metrics_dict[name_variable] = variable_array.mean()\\\n .astype(np.float64)\n self.save_json(metrics_dict, \"weather_data.json\",\n os.path.join(self.output_location,\n date_string,\n \"JSON\",\"WEATHER\"),\n self.output_location_type)\n return True", "def analyze(filename):\r\n start = datetime.datetime.now()\r\n\r\n ao_count = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n year_count = {\r\n \"2013\": 0,\r\n \"2014\": 0,\r\n \"2015\": 0,\r\n \"2016\": 0,\r\n \"2017\": 0,\r\n \"2018\": 0\r\n }\r\n for row in reader:\r\n l_row = list(row)\r\n print(f\"\\n{row}\")\r\n year = l_row[5][6:]\r\n if year in year_count.keys():\r\n year_count[year] += 1\r\n if \"ao\" in l_row[6]:\r\n ao_count += 1\r\n\r\n end = datetime.datetime.now()\r\n return start, end, year_count, ao_count", "def process_single_date(self, date_string):\n # see if there is already a network_centralities.json file in\n # the output location - if so, skip\n output_location = os.path.join(self.output_location, date_string,\"JSON\",\"NC\")\n if (not self.replace_existing_files) and \\\n self.check_for_existing_files(output_location, 1):\n return True\n\n input_path = os.path.join(self.input_location, date_string, \"SPLIT\")\n all_input_files = self.list_directory(input_path, self.input_location_type)\n print(\"input path is {}\".format(input_path))\n\n # list all the \"BWNDVI\" sub-images where RGB image passes quality check\n input_files = [filename for filename in all_input_files \\\n if \"BWNDVI\" in filename and \\\n self.check_sub_image(filename, input_path)]\n if len(input_files) == 0:\n print(\"{}: No sub-images for date {}\".format(self.name,\n date_string))\n return\n else:\n print(\"{} found {} sub-images\".format(self.name, len(input_files)))\n tmp_json_dir = tempfile.mkdtemp()\n\n # if we only want a subset of sub-images, truncate the list here\n if self.n_sub_images > 0:\n input_files = input_files[:self.n_sub_images]\n\n # create a multiprocessing pool to handle each sub-image in parallel\n with Pool(processes=self.n_threads) as pool:\n # prepare the arguments for the process_sub_image function\n arguments=[(i,\n self.get_file(os.path.join(input_path,filename),\n self.input_location_type),\n tmp_json_dir,\n date_string,\n find_coords_string(filename)) \\\n for i, filename in enumerate(input_files)]\n pool.starmap(process_sub_image, arguments)\n # put all the output json files for subimages together into one for this date\n print(\"\\n Consolidating json from all subimages\")\n all_subimages = consolidate_json_to_list(tmp_json_dir)\n self.save_json(all_subimages, \"network_centralities.json\",\n output_location,\n self.output_location_type)\n shutil.rmtree(tmp_json_dir)\n return True", "def doCountTask(filename):\n f = open(filename)\n dataDict = json.load(f)\n weridCount = 0\n unweridCount = 0\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n weridCount += 1\n else:\n unweridCount += 1\n return [unweridCount, weridCount]", "def count_data(path):\n matcher = re.compile(r'[0-9]+\\.json')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def get_stats(filtered_paths : dict) -> dict:\n stats_dict = defaultdict(dict)\n for mag in filtered_paths.keys():\n for spec,paths in filtered_paths[mag].items():\n counter = Counter()\n for path in paths:\n img = cv2.imread(path, -1)\n count = Counter(list(img.ravel()))\n counter += count\n stats_dict[mag][spec] = counter\n print(mag)\n return stats_dict", "def date_to_draw_number(date):\n\n today = date.today()\n\n #hotspot plays only last for 180 days\n #validate entered date\n if (today - date).days > 180 or date > today:\n return 0\n\n days_between = (date - INIT_DATE).days\n\n return INIT_DRAW_NUMBER + (300 * days_between)\n\n\n # num_spots_sampled, spot_histogram, range_histogram, mod_histogram,\n # last_seen_dict, avg_draw_distance_dict, draw_distance_dict, last_n_avg_distance_dict_list, current_draw_num", "def check_for_recent_images(self):\n ds_list = []\n num_found = 0\n mime_list = ['image/jpg','image/png','image/jpeg','image/gif',]\n \n #build 650 doesn't seem to understand correctly the dictionary with a list right hand side\n info = xophotoactivity.sugar_version()\n if len(info)>0:\n (major,minor,micro,release) = info\n _logger.debug('sugar version major:%s minor:%s micro:%s release:%s'%info)\n else:\n _logger.debug('sugar version failure')\n minor = 70\n if minor > 80:\n (results,count) = datastore.find({'mime_type': ['image/jpeg','image/jpg', 'image/png','image/gif']})\n else:\n (results,count) = datastore.find({'mime_type': 'image/jpeg'})\n ds_list.extend(results)\n num_found += count \n (results,count) = datastore.find({'mime_type': 'image/jpg'})\n ds_list.extend(results)\n num_found += count\n (results,count) = datastore.find({'mime_type': 'image/png'})\n ds_list.extend(results)\n num_found += count\n (results,count) = datastore.find({'mime_type': 'image/gif'})\n ds_list.extend(results)\n num_found += count\n \n _logger.debug('Journal/datastore entries found:%s'%num_found)\n added = 0\n a_row_found = False\n cursor = self.db.connection().cursor()\n journal_list = []\n for ds in ds_list:\n #at least for now assume that the newest images are returned first\n if not a_row_found:\n journal_list.append(ds.object_id)\n dict = ds.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n cursor.execute('select * from groups where category = ? and jobject_id = ?',\\\n (display.journal_id,str(ds.object_id),))\n rows = cursor.fetchall()\n if len(rows) == 0:\n #may need to add date entered into ds (create date could be confusing)\n self.db.put_ds_into_picture(ds.object_id)\n self.db.add_image_to_album(display.journal_id,ds.object_id)\n added += 1\n else: #assume that pictures are returned in last in first out order\n #no longer true since we are getting each mime_type separately (build 650 kludge)\n #a_row_found = True\n pass\n ds.destroy()\n #now go through albums and remove references that are no longer in datastore\n #cursor.execute('select * from groups')\n _logger.debug('scan found %s. Added %s datastore object ids from datastore to picture'%(count,added,))\n return (num_found,added,)", "def get_recent_images(num_images=30):\n folder = app.config['UPLOAD_FOLDER']\n\n init_image_info()\n\n # get list of last modified images - ignore .json file and files start with .\n files = ['/'.join((folder, file)) \\\n for file in os.listdir(folder) if ('json' not in file) \\\n and not (file.startswith('.')) ]\n\n # list of tuples (file_path, timestamp)\n last_modified_files = [(file, os.path.getmtime(file)) for file in files]\n print(last_modified_files)\n last_modified_files = sorted(last_modified_files,\n key=lambda t: t[1], reverse=True)\n num_stored_images = len(last_modified_files)\n\n # build a list of image information\n image_stats = []\n\n print(\"THE NUMBER OF STORED IMAGES IS: {}\".format(num_stored_images))\n\n if num_stored_images != 0:\n\n # read in image info\n with open(IMAGE_INFO_JSON, 'r') as f:\n info = json.load(f)\n\n for i, f in enumerate(last_modified_files):\n # set limit for rendering pictures\n if i > num_images: break\n\n path, filename = f[0], f[0].replace(folder, '').replace('/', '')\n cur_image_info = info.get(filename, {})\n\n print(\"CURRENT IMAGE INFO IS: {}\".format(cur_image_info))\n\n img = {\n 'path': path,\n 'labels': cur_image_info\n }\n print(\"CURRENT IMG LABEL DATA IS: {}\".format(img['labels']))\n image_stats.append(img)\n\n return image_stats, num_stored_images", "def numPostings(years):\n\tcount = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name = 'Filters')\n\t\tcount.append(DB.iloc[10][1])\n\treturn count", "def scan_images(self):\n rtn = 0\n mime_list = self.db.get_mime_list()\n (results,count) = datastore.find({})\n for f in results:\n dict = f.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n #record the id, file size, file date, in_ds\n self.db.create_picture_record(f.object_id, f.get_file_path())\n rtn += 1\n f.destroy()\n self.db.commit()\n _logger.debug('%s entries found in journal. Number of pictures %s'%(count,rtn,))\n return rtn", "def get_num_of_images(self):", "def daily_observation(z, ncells, filename, total_int_time=4., int_time=10., boxsize=None, declination=30.):\n\tNbase, N_ant = from_antenna_config(filename, z)\n\tuv_map0 = get_uv_coverage(Nbase, z, ncells, boxsize=boxsize)\n\tuv_map\t = np.zeros(uv_map0.shape)\n\ttot_num_obs = int(3600.*total_int_time/int_time)\n\tfor i in xrange(tot_num_obs-1):\n\t\tnew_Nbase = earth_rotation_effect(Nbase, i+1, int_time, declination=declination)\n\t\tuv_map1 = get_uv_coverage(new_Nbase, z, ncells, boxsize=boxsize)\n\t\tuv_map += uv_map1\n\t\tprint i\n\tuv_map = (uv_map+uv_map1)/tot_num_obs\n\treturn uv_map, N_ant", "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def histogram_decades(our_data):\n decade_dict = {}\n for album in our_data:\n decade = int(album['year'])//10\n if decade in decade_dict:\n decade_dict[decade] += 1\n else:\n decade_dict[decade] = 1\n return decade_dict", "def analyze(filename):\n\n date_dict = {\"2013\":0,\"2014\":0,\"2015\":0,\"2016\":0,\"2017\":0,\"2018\":0}\n\n start = datetime.datetime.now()\n\n with open(filename) as csvfile:\n for line in csvfile:\n lrow = line.split(',')\n\n if \"ao\" in lrow[6]:\n found += 1\n\n # pylint: disable=C0122\n # Less than should be the default comparison operation\n if \"2012\" < lrow[5][6:] < \"2019\":\n date_dict[lrow[5][6:]] += 1\n\n\n print(f\"'ao' was found {found} times\")\n print(\n f\"2013:{date_dict['2013']}\\t\"\n f\"2014:{date_dict['2014']}\\t\"\n f\"2015:{date_dict['2015']}\\t\"\n f\"2016:{date_dict['2016']}\\t\"\n f\"2017:{date_dict['2017']}\\t\"\n f\"2018:{date_dict['2018']}\\n\"\n )\n end = datetime.datetime.now()\n return (\n start,\n end,\n # {\n # \"2013\": _2013,\n # \"2014\": _2014,\n # \"2015\": _2015,\n # \"2016\": _2016,\n # \"2017\": _2017,\n # \"2018\": _2018,\n # },\n found,\n )", "def analyze(filename, search_term):\n start = datetime.datetime.now()\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n new_ones = []\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n found = 0\n for row in reader:\n if row:\n lrow = list(row)\n if lrow[5] > '00/00/2012':\n if search_term in row[6]:\n found +=1\n try:\n year_count[lrow[5][-4:]] += 1 \n except:\n pass\n print(year_count)\n print(f\"'{search_term}' was found {found} times\")\n end = datetime.datetime.now()\n return (start, end, year_count, found)", "def count_by_year(all_articles):\n all_dates = get_all_dates(all_articles)\n year_count = {}\n\n for date in all_dates:\n if date.year in year_count:\n year_count[date.year] += 1\n else:\n year_count[date.year] = 1\n\n print_all_items_in_dict(year_count)", "def plot_zcalib(args):\n\n start_date = args.start_date\n end_date = args.end_date\n\n start_date_dt = dp.parse(start_date) \n end_date_dt = dp.parse(end_date) \n \n min_date = dp.parse(SETTINGS.MIN_START_DATE)\n max_date = dp.parse(SETTINGS.MAX_END_DATE)\n \n if start_date_dt < min_date or end_date_dt > max_date:\n raise ValueError(f'Date must be in range {SETTINGS.MIN_START_DATE} - {SETTINGS.MAX_END_DATE}')\n\n phi_dir = os.path.join(SETTINGS.PHI_DIR)\n img_dir = os.path.join(SETTINGS.Z_CALIB_DIR,'images/')\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n filelist1 = glob.glob(phi_dir+\"phiest*\")\n filelist2 = glob.glob(phi_dir+\"phiobs*\")\n filelist1.sort()\n filelist2.sort()\n dates=[]\n ind=[]\n\n if len(filelist1) != len(filelist2):\n raise ValueError(\"Number of phiest and phiobs files does not match!\")\n\n #Loop through files to find the indices of those between the inputted start and end dates\n for f in range(0,len(filelist1)):\n match = re.search(r'\\d{8}',filelist1[f])\n file=match.group()\n file_dt=dp.parse(file)\n if file_dt >= start_date_dt and file_dt <= end_date_dt:\n ind.append(f)\n dates.append(file)\n\n ndays=len(ind)\n print(ndays)\n\n #If the number of elevation angles in the volumes changes over time, then the total number of rays also varies\n #This loop finds the maximum number of rays\n for f in range(0,ndays):\n file=np.load(filelist1[ind[f]])\n if f==0:\n [_,a]=file.shape\n nrays=a\n else:\n [_,a2]=file.shape\n if a2>a:\n nrays=a2\n\n #Number of volumes can vary each day \n nvols=250\n phiest=np.zeros((ndays,nvols,nrays))*np.nan\n phiobs=np.zeros((ndays,nvols,nrays))*np.nan\n good_rays=np.zeros((ndays,nvols))*np.nan\n x=np.zeros((nvols,nrays))*np.nan\n \n #Load each phiest and phiobs data for each day and store into 3D array. \n #Calculate number of good rays for each day/volume\n d=0\n for f in range(0,ndays):\n phiest1 = np.load(filelist1[ind[f]])\n [a,b] = phiest1.shape\n phiest[d,0:a,0:b] = phiest1 \n phiobs1 = np.load(filelist2[ind[f]])\n [a,b] = phiobs1.shape\n phiobs[d,0:a,0:b] = phiobs1\n d=d+1\n \n #Calculate number of good rays in each volume. good_rays(ndays,nvols)\n for j in range(ndays): \n for i in range(nvols):\n good_rays[j,i] = np.nansum(np.isfinite(phiest[j,i,:]))\n\n #bias_each_ray (ndays,nvols,nrays)\n #Calculate a bias/offset for each individual ray\n bias_each_ray = (phiest - phiobs) / phiobs\n \n #Only use volumes with more than 10 good rays for calculation of overall bias.\n ind = good_rays>10\n \n #SINGLE VALUES FOR WHOLE TIME PERIOD\n mean_bias = np.nanmean(bias_each_ray[ind,:])\n mean_bias_db = 10.0*np.log10(1000+mean_bias*1000)-30\n \n median_bias = np.nanmedian(bias_each_ray[ind,:])\n median_bias_db = 10.0*np.log10(1000.0+median_bias*1000.0)-30.0\n \n std = np.nanstd(bias_each_ray[ind,:])\n std_db = 10.0*np.log10(1000.0+std*1000.0)-30.0\n \n #print 'Mean bias = ', mean_bias_db, 'Median bias = ', median_bias_db, 'Standard Deviation = ', std_db\n \n #DAILY VALUES OF BIAS\n mean_bias_each_day=np.zeros(ndays)*np.nan\n median_bias_each_day=np.zeros(ndays)*np.nan\n std_each_day=np.zeros(ndays)*np.nan\n std_error_each_day = np.zeros(ndays)*np.nan\n num_rays_day=np.zeros(ndays)\n \n for day in range(ndays):\n #good_rays has shape (days,vols)\n #find index for volumes with more than 10 good rays\n ind = good_rays[day,:]>10\n #find all rays on each day within these volumes\n bias_one_day = bias_each_ray[day,ind,:].flatten()\n ind2 = np.isfinite(bias_one_day) \n if np.sum(ind2)>0:\n std_error_each_day[day] = scipy.stats.sem(bias_one_day[ind2])\n mean_bias_each_day[day] = np.nanmean(bias_one_day)\n median_bias_each_day[day] = np.nanmedian(bias_one_day)\n std_each_day[day] = np.nanstd(bias_one_day)\n \n #Number of rays for each day\n num_rays_day[day] = np.sum(np.isfinite(bias_one_day))\n\n #Convert to dB \n mean_bias_each_day_db = 10.0*np.log10(1000.0+mean_bias_each_day*1000.0)-30.0\n median_bias_each_day_db = 10.0*np.log10(1000.0+median_bias_each_day*1000.0)-30.0\n std_each_day_db = 10.0*np.log10(1000.0+std_each_day*1000.0)-30.0\n std_error_each_day_db = 10.0*np.log10(1000.0+std_error_each_day*1000.0)-30.0\n \n #Put data into dataframe\n time = pd.to_datetime(dates, format = '%Y%m%d')\n data = pd.DataFrame({'Mean Bias' : mean_bias_each_day_db, 'Median Bias' : median_bias_each_day_db, \n 'Standard Error' : std_error_each_day_db, 'Standard Deviation' : std_each_day_db}, \n index=time) \n \n #Make plot \n fig, ax1 = plt.subplots(figsize=(15,8)) \n plt.errorbar(data.index, mean_bias_each_day_db, std_error_each_day_db, \n color='black',fmt='o',markersize='4', elinewidth=2,capsize=4)\n plt.plot([start_date_dt, end_date_dt],[median_bias_db,median_bias_db],'r-',\n label=\"Median Bias = %s\" % round(median_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db,mean_bias_db],'g', \n label=\"Mean Bias = %s\" % round(mean_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db+std_db*2,mean_bias_db+std_db*2],'g--',\n label=\"Standard Deviation = %s\" % round(std_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db-std_db*2,mean_bias_db-std_db*2],'g--')\n \n plt.plot(data.index, median_bias_each_day_db,'rx')\n \n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%y'))\n plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))\n plt.gca().xaxis.set_minor_locator(mdates.WeekdayLocator(interval=1))\n plt.xlim(start_date_dt,end_date_dt)\n\n plt.xticks(rotation=90)\n plt.xlabel('Time',{'fontsize':18})\n plt.ylabel('Z Bias (dBZ)',{'fontsize':18})\n plt.yticks(size=18)\n plt.xticks(size=18)\n plt.grid()\n plt.legend(loc=0,fontsize=18)\n \n #If you want to overlay number of rays for each data point then uncomment these lines.\n #May need some tweaking to get the yaxis scale correct for the data you are plotting. \n# ax2=ax1.twinx()\n# ax2.set_ylim(0,20000)\n# ax2.plot(data.index, num_rays_day,'bx-')\n# ax2.set_yticks([5000, 10000])\n# ax2.set_yticks([1000, 2000, 3000, 4000, 7500],minor=True)\n# plt.ylabel('Total number of Rays',{'fontsize':18})\n# plt.yticks(size=18)\n# plt.xlim(start_date_dt,end_date_dt)\n\n #Save the plot\n imgname = f'{img_dir}/Z_calibration_{start_date}_{end_date}.png'\n plt.tight_layout()\n plt.savefig(imgname,dpi=150)", "def monthly_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)\n\n if not os.path.exists(monthdir):\n print \"Creating new output directory \" + monthdir\n os.makedirs(monthdir)\n\n\n files = os.listdir(daydir) # get daily files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if len(files) < 28:\n print 'No month is yet complete, skipping monthly average'\n return\n\n fileinfo = {}\n for filename in files: # parse date from each of them\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n\n years = [d.year for d in fileinfo.values()] # get actual years\n months = set([d.month for d in fileinfo.values()]) # get actual months\n \n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(months=+1)\n\n ndays_in_month = (nd-sd).days\n \n avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if len(avg_files) != ndays_in_month: # only once month complete \n #print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)\n pass\n else:\n targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))\n if not os.path.exists(targetfile):\n print \"New month (%02d) is complete, I have %d days for the next file\"%(sd.month,ndays_in_month)\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n else:\n pass\n\n sd = nd", "def get_num_like_photos_daily(self, pid, date, like):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num FROM vote WHERE vote.mid = %s ' \\\n 'AND vote.upvote = %s AND voted_on > %s AND voted_on < %s'\n cursor.execute(query, (pid, like, date, end_date,))\n count = cursor.fetchall()\n return count[0]['num']", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def analyze(filename):\n\n start = datetime.datetime.now()\n found = 0\n new_ones = []\n\n # read file into a generator\n lines_generator = (line for line in open(filename, encoding=\"ISO-8859-1\"))\n\n # read generator into a list comprehension\n lists_generator = (l.split(\",\") for l in lines_generator)\n\n for line in lists_generator:\n if 'ao' in line[6]:\n found += 1\n lrow: List[str] = list(line)\n if lrow[5] > '00/00/2012':\n new_ones.append((lrow[5], lrow[0]))\n print(f\"'ao' was found {found}, times\")\n end = datetime.datetime.now()\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n # create yyyy from tuple, start at char(6) and grab to end of string\n # for each yyyy, add 1 yyyy if yyyy 2013-2017\n for new in new_ones:\n if new[0][6:] == '2013':\n year_count[\"2013\"] += 1\n if new[0][6:] == '2014':\n year_count[\"2014\"] += 1\n if new[0][6:] == '2015':\n year_count[\"2015\"] += 1\n if new[0][6:] == '2016':\n year_count[\"2016\"] += 1\n if new[0][6:] == '2017':\n year_count[\"2017\"] += 1\n if new[0][6:] == '2018':\n year_count[\"2017\"] += 1\n print(year_count)\n return start, end, year_count, found", "def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()", "def hindu_day_count(cls, date):\n return date - cls.EPOCH", "def get_num_replies_photos_daily(self, pid, date):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num ' \\\n 'FROM replies INNER JOIN messages ON messages.mid = replies.replied_to ' \\\n 'WHERE replies.replied_to = %s AND created_on > %s AND created_on < %s'\n cursor.execute(query, (pid, date, end_date,))\n count = cursor.fetchall()\n return count[0]['num']" ]
[ "0.6191097", "0.6127995", "0.602175", "0.57718587", "0.5609626", "0.5559178", "0.55234337", "0.54777694", "0.546455", "0.54527897", "0.54497105", "0.53750855", "0.5370652", "0.53426784", "0.53200203", "0.5316032", "0.5295907", "0.52799475", "0.52579844", "0.52557826", "0.5247591", "0.52295464", "0.52268183", "0.5219614", "0.52172196", "0.52101326", "0.51944274", "0.5156687", "0.5156593", "0.5156328" ]
0.6609439
0
Run the CommutativeCancellation pass on a dag
def run(self, dag): q_gate_list = ['cx', 'cy', 'cz', 'h', 'x', 'y', 'z'] # Gate sets to be cancelled cancellation_sets = defaultdict(lambda: []) for wire in dag.wires: wire_name = "{0}[{1}]".format(str(wire.register.name), str(wire.index)) wire_commutation_set = self.property_set['commutation_set'][wire_name] for com_set_idx, com_set in enumerate(wire_commutation_set): if com_set[0].type in ['in', 'out']: continue for node in com_set: num_qargs = len(node.qargs) if num_qargs == 1 and node.name in q_gate_list: cancellation_sets[(node.name, wire_name, com_set_idx)].append(node) if num_qargs == 1 and node.name in ['u1', 'rz', 't', 's']: cancellation_sets[('z_rotation', wire_name, com_set_idx)].append(node) elif num_qargs == 2 and node.qargs[0] == wire: second_op_name = "{0}[{1}]".format(str(node.qargs[1].register.name), str(node.qargs[1].index)) q2_key = (node.name, wire_name, second_op_name, self.property_set['commutation_set'][(node, second_op_name)]) cancellation_sets[q2_key].append(node) for cancel_set_key in cancellation_sets: set_len = len(cancellation_sets[cancel_set_key]) if ((set_len) > 1 and cancel_set_key[0] in q_gate_list): gates_to_cancel = cancellation_sets[cancel_set_key] for c_node in gates_to_cancel[:(set_len // 2) * 2]: dag.remove_op_node(c_node) elif((set_len) > 1 and cancel_set_key[0] == 'z_rotation'): run = cancellation_sets[cancel_set_key] run_qarg = run[0].qargs[0] total_angle = 0.0 # lambda for current_node in run: if (current_node.condition is not None or len(current_node.qargs) != 1 or current_node.qargs[0] != run_qarg): raise TranspilerError("internal error") if current_node.name in ['u1', 'rz']: current_angle = float(current_node.op.params[0]) elif current_node.name == 't': current_angle = sympy.pi / 4 elif current_node.name == 's': current_angle = sympy.pi / 2 # Compose gates total_angle = current_angle + total_angle # Replace the data of the first node in the run new_op = U1Gate(total_angle) new_qarg = QuantumRegister(1, 'q')[0] new_dag = DAGCircuit() new_dag.add_qreg(new_qarg.register) new_dag.apply_operation_back(new_op, [new_qarg]) dag.substitute_node_with_dag(run[0], new_dag) # Delete the other nodes in the run for current_node in run[1:]: dag.remove_op_node(current_node) return dag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, dag):\n\n # If the option commutative_analysis is set, construct DAGDependency from the given DAGCircuit.\n if self.do_commutative_analysis:\n dag = dag_to_dagdependency(dag)\n\n # call collect_function to collect blocks from DAG\n blocks = self.collect_function(dag)\n\n # call collapse_function to collapse each block in the DAG\n self.collapse_function(dag, blocks)\n\n # If the option commutative_analysis is set, construct back DAGCircuit from DAGDependency.\n if self.do_commutative_analysis:\n dag = dagdependency_to_dag(dag)\n\n return dag", "def test_cancel(self):\n g = TaskDependencyGraph(MockWorkflowContext())\n task = mock.Mock()\n g.add_task(task)\n with mock.patch('cloudify.workflows.api.cancel_request', True):\n self.assertRaises(api.ExecutionCancelled, g.execute)\n\n self.assertFalse(task.apply_async.called)\n self.assertFalse(task.cancel.called)", "def revoke_certi_task(c_id, revoke_date):\n\n loop1 = asyncio.get_event_loop()\n task1 = loop1.create_task(connection())\n conn = loop1.run_until_complete(task1)\n loop2 = asyncio.get_event_loop()\n task2 = loop2.create_task(revoke_flag(c_id, conn, revoke_date))\n loop2.run_until_complete(task2)", "def cancel(self, task):\n raise NotImplementedError", "def test_cancel(self) -> None:\n context: Dict[str,ArtifactDescriptor] = dict()\n cmd = pycell.python_cell(\n source='import time\\ntime.sleep(5)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(1)\n self.backend.cancel_task('000')\n time.sleep(5)\n self.assertIsNone(controller.task_id)\n self.assertIsNone(controller.state)", "async def test_cancel(\n decoy: Decoy,\n state_store: StateStore,\n command_executor: CommandExecutor,\n subject: QueueWorker,\n) -> None:\n subject.start()\n subject.cancel()\n\n await subject.join()\n\n decoy.verify(\n await command_executor.execute(command_id=matchers.Anything()),\n times=0,\n )", "def test_dag():\n\n def f(task_id):\n return f\"OP:{task_id}\"\n\n with DAG(dag_id=\"test_xcom_dag\", default_args=DEFAULT_ARGS) as dag:\n operators = [PythonOperator(python_callable=f, task_id=f\"test_op_{i}\") for i in range(4)]\n return dag, operators", "def _cancel(self, d):\n if self._finished:\n return\n try:\n raise CancelledError()\n except:\n self._caught_failure = failure.Failure()\n self._iterate()", "def hard_cancel(self, exec_info: ExecutionInfo) -> None:\n for task in exec_info.tasks.values():\n if not task.done():\n task.cancel()", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "def test_transaction_explitic_canceling(self):\n\n proxy = self.node.get_proxy('/')\n\n # look under the hood to verify that branches are added\n # recursively\n _latest_root_rev = self.node._branches[None].latest\n adapter_node = _latest_root_rev._children['adapters'][2].node\n self.assertEqual(len(self.node._branches.keys()), 1)\n self.assertEqual(len(adapter_node._branches.keys()), 1)\n\n tx = proxy.open_transaction()\n self.assertEqual(len(self.node._branches.keys()), 2)\n self.assertEqual(len(adapter_node._branches.keys()), 1)\n\n self.make_change(tx, '/adapters/2', 'config.log_level', 4)\n\n self.assertEqual(len(self.node._branches.keys()), 2)\n self.assertEqual(len(adapter_node._branches.keys()), 2)\n\n tx.cancel()\n\n self.assertEqual(len(self.node._branches.keys()), 1)\n self.assertEqual(len(adapter_node._branches.keys()), 1)", "async def test_cancel_noops_if_joined(\n decoy: Decoy,\n state_store: StateStore,\n command_executor: CommandExecutor,\n subject: QueueWorker,\n) -> None:\n subject.start()\n await subject.join()\n subject.cancel()", "def run(self, dag):\n self.property_set[\"commutation_set\"] = defaultdict(list)\n pending_1q = [list() for _ in range(dag.num_qubits())]\n block_id = [-(i + 1) for i in range(dag.num_qubits())]\n current_id = 0\n block_list = list()\n to_qid = dict()\n for i, qubit in enumerate(dag.qubits):\n to_qid[qubit] = i\n for node in dag.topological_op_nodes():\n qids = [to_qid[q] for q in node.qargs]\n if (\n not isinstance(node.op, Gate)\n or len(qids) > 2\n or node.op.condition\n or node.op.is_parameterized()\n ):\n for qid in qids:\n if block_id[qid] > 0:\n block_list[block_id[qid]].extend(pending_1q[qid])\n block_id[qid] = -(qid + 1)\n pending_1q[qid].clear()\n continue\n\n if len(qids) == 1:\n b_id = block_id[qids[0]]\n if b_id < 0:\n pending_1q[qids[0]].append(node)\n else:\n block_list[b_id].append(node)\n elif block_id[qids[0]] == block_id[qids[1]]:\n block_list[block_id[qids[0]]].append(node)\n else:\n block_id[qids[0]] = current_id\n block_id[qids[1]] = current_id\n new_block = list()\n if pending_1q[qids[0]]:\n new_block.extend(pending_1q[qids[0]])\n pending_1q[qids[0]].clear()\n if pending_1q[qids[1]]:\n new_block.extend(pending_1q[qids[1]])\n pending_1q[qids[1]].clear()\n new_block.append(node)\n block_list.append(new_block)\n current_id += 1\n\n self.property_set[\"block_list\"] = [tuple(block) for block in block_list]\n return dag", "async def cancel(id: UUID):\n async with get_client() as client:\n cancelling_state = State(type=StateType.CANCELLED, name=\"Cancelling\")\n try:\n result = await client.set_flow_run_state(\n flow_run_id=id, state=cancelling_state\n )\n except ObjectNotFound as exc:\n exit_with_error(f\"Flow run '{id}' not found!\")\n\n if result.status == SetStateStatus.ABORT:\n exit_with_error(\n f\"Flow run '{id}' was unable to be cancelled. Reason: '{result.details.reason}'\"\n )\n\n exit_with_success(f\"Flow run '{id}' was succcessfully scheduled for cancellation.\")", "def submit_dag(config, dag_file):\n with SUBMIT_LOCK:\n try:\n condor_dag_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['dag'])\n\n pipe = subprocess.Popen(args=(condor_dag_cmd, '-force', dag_file),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n output = pipe.stdout.read()\n status = pipe.wait()\n return status, output\n except OSError, exc:\n return -1, str(exc)", "async def cancel_shielded_checkpoint() -> None:\n await get_async_backend().cancel_shielded_checkpoint()", "async def test_cancelled_task(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.scheduler.cancel.assert_called_once_with(self.text_channel.id)", "def pegasus_remove(ctx, dag_id=None, verbose=False, submit_dir=None):\n if not submit_dir and not dag_id:\n print(\"You must provide either a dag_id or dagdirectory to remove a workflow.\")\n ctx.exit(1)\n\n if submit_dir:\n cwd = os.getcwd()\n\n submit_dir = str(Path(submit_dir).resolve())\n try:\n os.chdir(submit_dir)\n except PermissionError:\n click.secho(\n click.style(\"Error: \", fg=\"red\", bold=True)\n + \"Cannot change to directory %s\" % submit_dir\n )\n ctx.exit(1)\n\n config = slurp_braindb(submit_dir)\n if not config:\n click.secho(\n click.style(\"Error: \", fg=\"red\", bold=True)\n + \"%s is not a valid submit-dir\" % submit_dir\n )\n ctx.exit(1)\n\n dag_log_file = config[\"dag\"] + \".dagman.out\"\n pattern = re.compile(r\"\\.([0-9\\.]+) \\(CONDOR_DAGMAN\\) STARTING UP\")\n\n with open(dag_log_file) as fp:\n for line in fp.readlines():\n match = pattern.search(line)\n if match:\n dag_id = match.group(1)\n else:\n if not dag_id:\n click.secho(\n click.style(\"Error: \", fg=\"red\", bold=True)\n + \"You must provide either a dag-id or dag-directory to remove a workflow.\"\n )\n ctx.exit(1)\n\n os.chdir(cwd)\n\n if dag_id:\n condor_rm = shutil.which(\"condor_rm\")\n cmd = (condor_rm, dag_id)\n\n rv = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if rv.returncode == 0:\n click.echo(rv.stdout.decode().strip())\n click.secho(\"✨ Success\", fg=\"green\")\n else:\n click.echo(rv.stderr.decode().strip())\n click.secho(\"Error \", fg=\"red\", bold=True)\n ctx.exit(42)", "def run(self, dag):\n # Initiate the commutation set\n self.property_set['commutation_set'] = defaultdict(list)\n\n # Build a dictionary to keep track of the gates on each qubit\n # The key with format (wire_name) will store the lists of commutation sets\n # The key with format (node, wire_name) will store the index of the commutation set\n # on the wire with wire_name, thus, for example:\n # self.property_set['commutation_set'][wire_name][(node, wire_name)] will give the\n # commutation set that contains node.\n\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n self.property_set['commutation_set'][wire_name] = []\n\n # Add edges to the dictionary for each qubit\n for node in dag.topological_op_nodes():\n for (_, _, edge_data) in dag.edges(node):\n\n edge_name = edge_data['name']\n self.property_set['commutation_set'][(node, edge_name)] = -1\n\n # Construct the commutation set\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n\n for current_gate in dag.nodes_on_wire(wire):\n\n current_comm_set = self.property_set['commutation_set'][wire_name]\n if not current_comm_set:\n current_comm_set.append([current_gate])\n\n if current_gate not in current_comm_set[-1]:\n prev_gate = current_comm_set[-1][-1]\n does_commute = False\n try:\n does_commute = _commute(current_gate, prev_gate, self.cache)\n except TranspilerError:\n pass\n if does_commute:\n current_comm_set[-1].append(current_gate)\n\n else:\n current_comm_set.append([current_gate])\n\n temp_len = len(current_comm_set)\n self.property_set['commutation_set'][(current_gate, wire_name)] = temp_len - 1", "def cancel():", "def check_for_cancellation(self) -> Iterator:\n yield", "def cancel_workers(self):\n pass", "def cancel(self, pipeline: Optional['Pipeline'] = None, enqueue_dependents: bool = False):\n if self.is_canceled:\n raise InvalidJobOperation(\"Cannot cancel already canceled job: {}\".format(self.get_id()))\n from .queue import Queue\n from .registry import CanceledJobRegistry\n\n pipe = pipeline or self.connection.pipeline()\n\n while True:\n try:\n q = Queue(\n name=self.origin, connection=self.connection, job_class=self.__class__, serializer=self.serializer\n )\n\n self.set_status(JobStatus.CANCELED, pipeline=pipe)\n if enqueue_dependents:\n # Only WATCH if no pipeline passed, otherwise caller is responsible\n if pipeline is None:\n pipe.watch(self.dependents_key)\n q.enqueue_dependents(self, pipeline=pipeline, exclude_job_id=self.id)\n self._remove_from_registries(pipeline=pipe, remove_from_queue=True)\n\n registry = CanceledJobRegistry(\n self.origin, self.connection, job_class=self.__class__, serializer=self.serializer\n )\n registry.add(self, pipeline=pipe)\n if pipeline is None:\n pipe.execute()\n break\n except WatchError:\n if pipeline is None:\n continue\n else:\n # if the pipeline comes from the caller, we re-raise the\n # exception as it is the responsibility of the caller to\n # handle it\n raise", "async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)", "def test_parent_not_executed():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_parent_not_executed_dag\", schedule_interval=None, start_date=start_date)\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op3\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op3 = DummyOperator(task_id=\"op3\", dag=dag)\n op1 >> [op2, op3]\n\n ti2 = TaskInstance(op2, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0\n assert dep.is_met(ti2, session)\n assert ti2.state == State.NONE", "def cancel_run(self, run_id):\n raise NotImplementedError()", "def _build_job_cancellation_call(self,\n name,\n job_settings,\n logger):\n raise NotImplementedError(\n \"'_build_job_cancellation_call' not implemented.\")", "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "async def test_cancel(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'cancel_test'\n interval_schedule.process_name = 'sleep30'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(5)\n tasks = await scheduler.get_running_tasks()\n\n await scheduler.cancel_task(tasks[0].task_id) # Cancel a running task\n\n await self.stop_scheduler(scheduler)", "def _push_node_back(self, dag: DAGCircuit, node: DAGOpNode):\n node_start_time = self.property_set[\"node_start_time\"]\n conditional_latency = self.property_set.get(\"conditional_latency\", 0)\n clbit_write_latency = self.property_set.get(\"clbit_write_latency\", 0)\n\n if isinstance(node.op, Gate):\n alignment = self.pulse_align\n elif isinstance(node.op, Measure):\n alignment = self.acquire_align\n elif isinstance(node.op, Delay) or getattr(node.op, \"_directive\", False):\n # Directive or delay. These can start at arbitrary time.\n alignment = None\n else:\n raise TranspilerError(f\"Unknown operation type for {repr(node)}.\")\n\n this_t0 = node_start_time[node]\n\n if alignment is not None:\n misalignment = node_start_time[node] % alignment\n if misalignment != 0:\n shift = max(0, alignment - misalignment)\n else:\n shift = 0\n this_t0 += shift\n node_start_time[node] = this_t0\n\n # Compute shifted t1 of this node separately for qreg and creg\n new_t1q = this_t0 + node.op.duration\n this_qubits = set(node.qargs)\n if isinstance(node.op, Measure):\n # creg access ends at the end of instruction\n new_t1c = new_t1q\n this_clbits = set(node.cargs)\n else:\n if node.op.condition_bits:\n # conditional access ends at the beginning of node start time\n new_t1c = this_t0\n this_clbits = set(node.op.condition_bits)\n else:\n new_t1c = None\n this_clbits = set()\n\n # Check immediate successors for overlap\n for next_node in self._get_next_gate(dag, node):\n # Compute next node start time separately for qreg and creg\n next_t0q = node_start_time[next_node]\n next_qubits = set(next_node.qargs)\n if isinstance(next_node.op, Measure):\n # creg access starts after write latency\n next_t0c = next_t0q + clbit_write_latency\n next_clbits = set(next_node.cargs)\n else:\n if next_node.op.condition_bits:\n # conditional access starts before node start time\n next_t0c = next_t0q - conditional_latency\n next_clbits = set(next_node.op.condition_bits)\n else:\n next_t0c = None\n next_clbits = set()\n # Compute overlap if there is qubits overlap\n if any(this_qubits & next_qubits):\n qreg_overlap = new_t1q - next_t0q\n else:\n qreg_overlap = 0\n # Compute overlap if there is clbits overlap\n if any(this_clbits & next_clbits):\n creg_overlap = new_t1c - next_t0c\n else:\n creg_overlap = 0\n\n # Shift next node if there is finite overlap in either in qubits or clbits\n overlap = max(qreg_overlap, creg_overlap)\n node_start_time[next_node] = node_start_time[next_node] + overlap" ]
[ "0.6119412", "0.5714813", "0.54337865", "0.5399336", "0.5372339", "0.5356294", "0.532089", "0.5288728", "0.5269343", "0.5261692", "0.5250306", "0.52219206", "0.51915073", "0.51788664", "0.5174982", "0.51746696", "0.5166423", "0.5163573", "0.5149194", "0.51294947", "0.5109441", "0.5108175", "0.5081314", "0.5071634", "0.50365716", "0.50137484", "0.5006846", "0.50065964", "0.49907997", "0.4979801" ]
0.76111484
0
Helper to render index page with custom_proc
def render_index(request, *args, **kwargs): # add context_instance keyword kwargs.update( {'context_instance': RequestContext(request, processors=[custom_proc])}) return render(request, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_fn():\n page_title = \"Index\"\n \n if request.method == \"GET\":\n return render_template(\"index.html\",\n pg_title=page_title,\n )", "def index(self):\n\t\treturn render_template('index.html')", "def index():\n return render_template('0-index.html')", "def get_index():\n return render_template('index.html')", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index():\n return render_template('pages/index.html', isNav=True)", "def index_page():\n \n return render_template(\"index.html\")", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\n # Render template\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")" ]
[ "0.71589965", "0.69598264", "0.6947079", "0.6940647", "0.6934658", "0.6934658", "0.6934658", "0.6873126", "0.68489075", "0.68354297", "0.68185735", "0.68185735", "0.67878515", "0.6760982", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396", "0.67558396" ]
0.82352185
0
Test that each paraemter defined in an environment file is also defined in the paired heat template
def test_env_params_are_defined_in_template(yaml_file): bad = [] template_pair = get_environment_pair(yaml_file) if not template_pair: pytest.skip("No yaml/env pair could be determined") template = template_pair.get("yyml").get("parameters", {}) environment = template_pair.get("eyml").get("parameters", {}) if not isinstance(template, dict) or not isinstance(environment, dict): pytest.skip("No parameters defined in environment or template") template = template.keys() environment = environment.keys() for parameter in environment: if parameter not in template: bad.append( ( "{} is defined in the environment file but not in " + "the template file " ).format(parameter) ) msg = ( "All parameters defined in an environment file must " + "be defined in the template file. " + ". ".join(bad) ) assert not bad, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_setup_multiple_parameters_system():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_script = get_template_script(tmp_dir)\n\n # Force antechamber parametrization of benzene to output frcmod file\n exp_builder = ExperimentBuilder(yaml_script)\n exp_builder._db._setup_molecules('benzene')\n benzene_dir = exp_builder._db.get_molecule_dir('benzene')\n frcmod_path = os.path.join(benzene_dir, 'benzene.frcmod')\n benzene_path = os.path.join(benzene_dir, 'benzene.gaff.mol2')\n\n # Redefine benzene to use leaprc.gaff and benzene.frcmod\n # and set up system for hydration free energy calculation\n yaml_script['molecules'] = {\n 'benzene-frcmod': {'filepath': benzene_path,\n 'leap': {'parameters': ['leaprc.gaff', frcmod_path]}}}\n yaml_script['systems'] = {\n 'system':\n {'solute': 'benzene-frcmod', 'solvent1': 'PME', 'solvent2': 'vacuum',\n 'leap': {'parameters': 'oldff/leaprc.ff14SB'}}\n }\n del yaml_script['experiments']\n\n exp_builder = ExperimentBuilder(yaml_script)\n system_files_path = exp_builder._db.get_system('system')\n\n # Check that output exist:\n for phase in system_files_path:\n assert os.path.exists(phase.parameters_path)\n assert os.path.exists(phase.position_path)\n assert os.path.getsize(phase.parameters_path) > 0\n assert os.path.getsize(phase.position_path) > 0", "def test_template_assignment():\n humidity_template = ConditionTemplate(\"Humidity\", bounds=RealBounds(0.5, 0.75, \"\"))\n template = ProcessTemplate(\"Dry\", conditions=[[humidity_template, RealBounds(0.5, 0.65, \"\")]])\n ProcessSpec(\"Dry a polymer\", template=template, conditions=[\n Condition(\"Humidity\", value=NominalReal(0.6, \"\"), template=humidity_template)])", "def test_heat_deployed_environment_error(self):\n exp_ret = {\n \"name\": (\"mystack\",),\n \"comment\": \"Error parsing template Template format version not found.\",\n \"changes\": {\"stack_name\": \"mystack\", \"comment\": \"Create stack\"},\n \"result\": False,\n }\n\n patch_heat = patch.dict(\n heat.__salt__,\n {\n \"heat.show_stack\": MagicMock(return_value={\"result\": False}),\n \"heat.create_stack\": salt.modules.heat.create_stack,\n },\n )\n\n patch_file = patch.dict(\n \"salt.modules.heat.__salt__\",\n {\n \"file.get_managed\": file_.get_managed,\n \"file.manage_file\": MagicMock(\n side_effect=[{\"result\": True}, {\"result\": False}]\n ),\n },\n )\n\n patch_create = patch(\n \"salt.modules.heat.create_stack\",\n MagicMock(\n return_value={\"result\": True, \"comment\": \"Created stack 'mystack'.\"}\n ),\n )\n\n with patch_heat, patch_file, patch_create, self.patch_check:\n ret = heat.deployed(\n name=\"mystack\",\n profile=\"openstack1\",\n template=os.path.join(\n RUNTIME_VARS.BASE_FILES, \"templates\", \"heat-template.yml\"\n ),\n poll=0,\n environment=os.path.join(\n RUNTIME_VARS.BASE_FILES, \"templates\", \"heat-env.yml\"\n ),\n )\n assert ret == exp_ret", "def check_prerequisites(self, env):\n super(EquatorialUpperocean, self).check_prerequisites(env)\n print(' Checking prerequisites for : {0}'.format(self.__class__.__name__))\n\n # check in the PHC2 files exist based on the number of vertical levels\n # check temp\n phcTemp = env['TOBSFILE']\n if env['VERTICAL'] == 42:\n phcTemp = env['TOBSFILE_V42']\n\n # check if the link to the PHC temp file exists and is readable\n sourceFile = '{0}/{1}'.format(env['TSOBSDIR'],phcTemp)\n linkFile = '{0}/{1}'.format(env['WORKDIR'],phcTemp)\n diagUtilsLib.createSymLink(sourceFile, linkFile)\n\n # check salt\n phcSalt = env['SOBSFILE']\n if env['VERTICAL'] == 42:\n phcTemp = env['SOBSFILE_V42']\n\n # check if the link to the PHC salt file exists and is readable\n sourceFile = '{0}/{1}'.format(env['TSOBSDIR'],phcSalt)\n linkFile = '{0}/{1}'.format(env['WORKDIR'],phcSalt)\n diagUtilsLib.createSymLink(sourceFile, linkFile)\n\n # check if the link to the TOGA-TAO exists and is readable\n sourceFile = '{0}/{1}'.format(env['TOGATAODIR'],env['TOGATAOFILE'])\n linkFile = '{0}/{1}'.format(env['WORKDIR'],env['TOGATAOFILE'])\n diagUtilsLib.createSymLink(sourceFile, linkFile)", "def test_setup_explicit_solvation_system():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_script = get_template_script(tmp_dir)\n del yaml_script['experiments']\n exp_builder = ExperimentBuilder(yaml_script)\n output_dir = os.path.dirname(\n exp_builder._db.get_system('hydration-system')[0].position_path)\n\n # Test that output file exists and that it has correct components\n expected_resnames = {'solvent1': set(['TOL', 'WAT']), 'solvent2': set(['TOL'])}\n for phase in expected_resnames:\n found_resnames = set()\n pdb_path = os.path.join(output_dir, phase + '.pdb')\n prmtop_path = os.path.join(output_dir, phase + '.prmtop')\n inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')\n\n with open(pdb_path, 'r') as f:\n for line in f:\n if len(line) > 10 and line[:5] != 'CRYST':\n found_resnames.add(line[17:20])\n\n assert os.path.exists(prmtop_path)\n assert os.path.exists(inpcrd_path)\n assert os.path.getsize(prmtop_path) > 0\n assert os.path.getsize(inpcrd_path) > 0\n assert found_resnames == expected_resnames[phase]", "def _process_environments(self):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n raw = '/'.join((self.rawdir, 'environment'))\n logger.info(\"building labels for environment\")\n env_parts = {}\n label_map = {}\n env = Environment(g)\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (environment_id, uniquename, description) = line\n # 22 heat sensitive | tetracycline conditional\n\n environment_num = environment_id\n environment_internal_id = self._makeInternalIdentifier(\n 'environment', environment_num)\n if environment_num not in self.idhash['environment']:\n self.idhash['environment'][environment_num] = \\\n environment_internal_id\n\n environment_id = self.idhash['environment'][environment_num]\n environment_label = uniquename\n if environment_label == 'unspecified':\n environment_label += ' environment'\n env.addEnvironment(environment_id, environment_label)\n self.label_hash[environment_id] = environment_label\n\n # split up the environment into parts\n # if there's parts, then add them to the hash;\n # we'll match the components in a second pass\n components = re.split(r'\\|', uniquename)\n if len(components) > 1:\n env_parts[environment_id] = components\n else:\n label_map[environment_label] = environment_id\n\n # ### end loop through file\n\n # build the environmental components\n for eid in env_parts:\n eid = eid.strip()\n for e in env_parts[eid]:\n # search for the environmental component by label\n env_id = label_map.get(e.strip())\n env.addComponentToEnvironment(eid, env_id)\n\n return", "def test_env_top_list(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n - FOO=This is foo # No quotes\n - FOO_WITH_QUOTES=\"Quoted foo\" # Quotes included in value\n - BAR=This is bar\n - MAGIC=42\n - SWITCH_2=true\n - EMPTY=\n - EXTERNAL # Comes from os env\n - EXTERNAL_NOTSET # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def test_setup_explicit_system_leap():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir)\n exp_builder = ExperimentBuilder(yaml_content)\n\n output_dir = os.path.dirname(\n exp_builder._db.get_system('explicit-system')[0].position_path)\n\n # Test that output file exists and that there is water\n expected_resnames = {'complex': set(['BEN', 'TOL', 'WAT']),\n 'solvent': set(['TOL', 'WAT'])}\n for phase in expected_resnames:\n found_resnames = set()\n pdb_path = os.path.join(output_dir, phase + '.pdb')\n prmtop_path = os.path.join(output_dir, phase + '.prmtop')\n inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')\n\n with open(pdb_path, 'r') as f:\n for line in f:\n if len(line) > 10 and line[:5] != 'CRYST':\n found_resnames .add(line[17:20])\n\n assert os.path.exists(prmtop_path)\n assert os.path.exists(inpcrd_path)\n assert os.path.getsize(prmtop_path) > 0\n assert os.path.getsize(inpcrd_path) > 0\n assert found_resnames == expected_resnames[phase]", "def test013(testDir, dirDict, pflag):\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_system.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_c.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_a.cfg\"), testDir)\n psys = osp.join(testDir, \"params_system.cfg\")\n ptrodec = osp.join(testDir, \"params_c.cfg\")\n ptrodea = osp.join(testDir, \"params_a.cfg\")\n P_s = IO.get_config(psys)\n P_s.set(\"Sim Params\", \"profileType\", \"CV\")\n P_s.set(\"Sim Params\", \"Vset\", \"3.8\")\n P_s.set(\"Sim Params\", \"tsteps\", \"25\")\n P_s.set(\"Sim Params\", \"Nvol_c\", \"2\")\n P_s.set(\"Sim Params\", \"Nvol_s\", \"2\")\n P_s.set(\"Sim Params\", \"Nvol_a\", \"2\")\n P_s.set(\"Particles\", \"cs0_c\", \"0.2\")\n P_s.set(\"Particles\", \"cs0_a\", \"0.95\")\n P_s.set(\"Electrolyte\", \"elyteModelType\", \"dilute\")\n IO.write_config_file(P_s, psys)\n P = IO.get_config(ptrodec)\n P.set(\"Particles\", \"type\", \"homog\")\n P.set(\"Particles\", \"shape\", \"sphere\")\n P.set(\"Material\", \"muRfunc\", \"LiMn2O4_ss2\")\n P.set(\"Reactions\", \"rxnType\", \"Marcus\")\n IO.write_config_file(P, ptrodec)\n P = IO.get_config(ptrodea)\n P.set(\"Particles\", \"discretization\", \"2.5e-9\")\n P.set(\"Particles\", \"shape\", \"cylinder\")\n P.set(\"Material\", \"muRfunc\", \"testIS_ss\")\n P.set(\"Reactions\", \"rxnType\", \"BV_raw\")\n IO.write_config_file(P, ptrodea)\n main.main(psys, keepArchive=False)\n shutil.move(dirDict[\"simOut\"], testDir)\n if pflag:\n corePlots(testDir, dirDict)\n elytePlots(testDir, dirDict)\n electrodePlots(testDir, dirDict, \"c\")\n cmpr.bulkpf(testDir, dirDict, \"c\")\n electrodePlots(testDir, dirDict, \"a\")\n cmpr.bulkpf(testDir, dirDict, \"a\")", "def test_template_forloop(self):\n print \"Running: %s - %s\" % (self.id(), self.shortDescription())\n # compare sequence items\n for i in range(0, 3):\n input_item = g.config['templates']['sequence'][i]\n output_item = self.output_config['out_templates']['sequence'][i]\n\n self.assertEqual(input_item, output_item,\n 'sequence items (%i) do not match' % i)", "def test_plasma_nlte_exc_section_config(\n tardis_config_verysimple_nlte, nlte_raw_model, nlte_atom_data\n):\n tardis_config_verysimple_nlte[\"plasma\"][\"continuum_interaction\"][\n \"species\"\n ] = [\n \"He I\",\n ]\n tardis_config_verysimple_nlte[\"plasma\"][\"nlte_excitation_species\"] = [\"H I\"]\n config = Configuration.from_config_dict(tardis_config_verysimple_nlte)\n with pytest.raises(PlasmaConfigError):\n plasma = assemble_plasma(config, nlte_raw_model, nlte_atom_data)", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def test_no_reproducible_for_varinat_analysis(self):\n self.testcases[0].job_type = 'some_type1'\n self.testcases[0].project_name = 'project1'\n self.testcases[0].crash_state = 'abcde'\n self.testcases[0].one_time_crasher_flag = False\n self.testcases[0].crash_type = 'crash_type1'\n self.testcases[0].security_flag = True\n self.testcases[1].job_type = 'some_type2'\n self.testcases[1].project_name = 'project1'\n self.testcases[1].crash_state = 'vwxyz'\n self.testcases[1].crash_type = 'crash_type2'\n self.testcases[1].one_time_crasher_flag = True\n self.testcases[1].security_flag = True\n\n for t in self.testcases:\n t.put()\n\n # testcase2's varinat will be evaluated against testcase1\n self.testcase_variants[0].job_type = 'fake_engine_asan_project1'\n self.testcase_variants[0].testcase_id = self.testcases[0].key.id()\n self.testcase_variants[0].security_flag = True\n self.testcase_variants[1].job_type = 'some_type1'\n self.testcase_variants[1].crash_state = 'abcde'\n self.testcase_variants[1].crash_type = 'crash_type1'\n self.testcase_variants[1].testcase_id = self.testcases[1].key.id()\n self.testcase_variants[1].security_flag = True\n\n for v in self.testcase_variants:\n v.put()\n\n grouper.group_testcases()\n\n for index, t in enumerate(self.testcases):\n self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())\n self.assertEqual(self.testcases[index].group_id, 0)\n self.assertTrue(self.testcases[index].is_leader)", "def test_heat_deployed_environment(self):\n exp_ret = {\n \"name\": (\"mystack\",),\n \"comment\": \"Created stack 'mystack'.\",\n \"changes\": {\"stack_name\": \"mystack\", \"comment\": \"Create stack\"},\n \"result\": True,\n }\n\n patch_heat = patch.dict(\n heat.__salt__,\n {\n \"heat.show_stack\": MagicMock(return_value={\"result\": False}),\n \"heat.create_stack\": salt.modules.heat.create_stack,\n },\n )\n\n patch_file = patch.dict(\n \"salt.modules.heat.__salt__\",\n {\n \"file.get_managed\": file_.get_managed,\n \"file.manage_file\": file_.manage_file,\n },\n )\n\n patch_create = patch(\n \"salt.modules.heat.create_stack\",\n MagicMock(\n return_value={\"result\": True, \"comment\": \"Created stack 'mystack'.\"}\n ),\n )\n\n with patch_heat, patch_file, patch_create, self.patch_check:\n ret = heat.deployed(\n name=\"mystack\",\n profile=\"openstack1\",\n template=os.path.join(\n RUNTIME_VARS.BASE_FILES, \"templates\", \"heat-template.yml\"\n ),\n poll=0,\n environment=os.path.join(\n RUNTIME_VARS.BASE_FILES, \"templates\", \"heat-env.yml\"\n ),\n )\n assert ret == exp_ret", "def test_extract_configs():\n extract_config_dir = os.path.join(\n settings.BASE_DIR, \"extract_configs\", \"templates\"\n )\n for ft, obj in FILE_TYPES.items():\n ec_file = obj[\"template\"]\n if not ec_file:\n continue\n ec_path = os.path.join(extract_config_dir, ec_file)\n print(f\"Testing extract config: {ec_path}\")\n assert os.path.exists(ec_path)\n df = make_template_df(ft)\n Extractor().extract(df, ec_path)", "def test_do_some_environments_exist():\n assert len(environments) > 0", "def test_env_top_dict(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n FOO: This is foo\n FOO_WITH_QUOTES: \"\\\"Quoted foo\\\"\" # Quotes included in value\n BAR: \"This is bar\"\n MAGIC: 42\n SWITCH_1: true # YAML boolean\n SWITCH_2: \"true\" # YAML string\n EMPTY: \"\"\n EXTERNAL: # Comes from os env\n EXTERNAL_NOTSET: # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_1=\"True\", # Unfortunately this is due to str(bool(1))\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def test_environments_deployment(\n self,\n cd_tmp_path: Path,\n empty_opts_from_file: None,\n fx_deployments: YamlLoaderDeployment,\n runway_context: MockRunwayContext,\n ) -> None:\n runway_context.env.root_dir = cd_tmp_path\n deployment = fx_deployments.load(\"environments_map_str\")\n mod_def = deployment.modules[0]\n mod_def.environments = {\"dev\": [\"us-east-1\"], \"prod\": [\"us-east-1\"]}\n mod = Module(context=runway_context, definition=mod_def, deployment=deployment)\n assert mod.environments == {\n \"dev\": [\"us-east-1\"],\n \"prod\": [\"us-east-1\"],\n \"test\": \"123456789012/us-east-1\",\n }", "def configTBConditions(process,key='default'):\n\n process.hgCalModuleInfoESSource.filename = 'Geometry/HGCalMapping/data/modulelocator_tb.txt'\n process.hgCalSiModuleInfoESSource.filename = 'Geometry/HGCalMapping/data/WaferCellMapTraces.txt'\n\n pedestals={\n 'default':'/eos/cms/store/group/dpg_hgcal/comm_hgcal/ykao/calibration_parameters_v2.txt',\n }\n if hasattr(process,'hgCalPedestalsESSource'):\n process.hgCalPedestalsESSource.filename = pedestals[key]\n if hasattr(process,'hgcalCalibrationESProducer'):\n process.hgcalCalibrationESProducer.filename = pedestals[key]\n\n return process", "def test_validation_correct_systems():\n data_paths = examples_paths()\n exp_builder = ExperimentBuilder()\n basic_script = \"\"\"\n ---\n molecules:\n rec: {{filepath: {0}, leap: {{parameters: leaprc.ff14SB}}}}\n rec_reg: {{filepath: {0}, regions: {{receptregion: 'some dsl'}}, leap: {{parameters: leaprc.ff14SB}}}}\n lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}\n lig_reg: {{name: lig, regions: {{ligregion: [143, 123]}}, leap: {{parameters: leaprc.gaff}}}}\n solvents:\n solv: {{nonbonded_method: NoCutoff}}\n solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}\n solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}\n solv4: {{nonbonded_method: PME}}\n \"\"\".format(data_paths['lysozyme'])\n basic_script = yaml.load(textwrap.dedent(basic_script), Loader=yaml.FullLoader)\n\n systems = [\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv'},\n {'receptor': 'rec_reg', 'ligand': 'lig_reg', 'solvent': 'solv'},\n {'receptor': 'rec_reg', 'ligand': 'lig', 'solvent': 'solv'},\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv', 'pack': True},\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',\n 'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},\n\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent': 'solv'},\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent': 'solv4'},\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent1': 'solv3',\n 'solvent2': 'solv2'},\n\n {'phase1_path': data_paths['pxylene-complex'],\n 'phase2_path': data_paths['pxylene-solvent'],\n 'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',\n 'gromacs_include_dir': data_paths['pxylene-gro-include']},\n {'phase1_path': data_paths['pxylene-complex'],\n 'phase2_path': data_paths['pxylene-solvent'],\n 'ligand_dsl': 'resname p-xylene', 'solvent': 'solv'},\n\n {'phase1_path': data_paths['toluene-solvent'],\n 'phase2_path': data_paths['toluene-vacuum'],\n 'ligand_dsl': 'resname TOL'},\n {'phase1_path': data_paths['toluene-solvent'],\n 'phase2_path': data_paths['toluene-vacuum'],\n 'ligand_dsl': 'resname TOL', 'solvent_dsl': 'not resname TOL'},\n\n {'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},\n {'solute': 'lig_reg', 'solvent1': 'solv', 'solvent2': 'solv'},\n {'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv',\n 'leap': {'parameters': 'leaprc.gaff'}}\n ]\n for system in systems:\n modified_script = basic_script.copy()\n modified_script['systems'] = {'sys': system}\n yield exp_builder.parse, modified_script", "def test_component_specifications_exist(self):\r\n\t\tself.assertTrue(not (self._configuration_[\"AddWordDefinitionTask\"] is None\r\n\t\t or self._configuration_[\"ListWordDefinitionsTask\"] is None or\r\n\t\t self._configuration_[\"RemoveWordDefinitionTask\"] is None))", "def test_environment_id():\n for l in list(environments.data):\n e = environments[l]\n assert e.id > 0", "def test_settings_multiple_files_and_env(mock_os_environ, mock_settings_files, tmpdir):\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=mock_settings_files[0])\n assert isinstance(climate.settings, Mapping)\n\n assert dict(climate.settings) == {\n \"testgroup\": {\"test_var\": 6, \"testvar\": 7, \"testvar_inline_1\": \"foo\"},\n \"othergroup\": {\"blabla\": 555, \"testvar_inline_2\": \"bar\"},\n \"testgroup_test_var\": 9,\n }\n\n expected_fragments = [\n Fragment(\n value={\n \"testgroup\": {\n \"testvar\": 123,\n \"testvar_inline_1_from_file\": str(tmpdir / \"sub2\" / \"secret.txt\"),\n },\n \"othergroup\": {\n \"blabla\": 55,\n \"testvar_inline_2_from_file\": str(tmpdir / \"sub2\" / \"secret.txt\"),\n },\n },\n source=mock_settings_files[0][0],\n path=[],\n ),\n Fragment(\n value=core.REMOVED,\n source=mock_settings_files[0][0],\n path=[\"testgroup\", \"testvar_inline_1_from_file\"],\n ),\n Fragment(\n value=\"foo\",\n source=mock_settings_files[0][0],\n path=[\"testgroup\", \"testvar_inline_1\"],\n ),\n Fragment(\n value=core.REMOVED,\n source=mock_settings_files[0][0],\n path=[\"othergroup\", \"testvar_inline_2_from_file\"],\n ),\n Fragment(\n value=\"foo\",\n source=mock_settings_files[0][0],\n path=[\"othergroup\", \"testvar_inline_2\"],\n ),\n Fragment(\n value={\"othergroup\": {\"blabla\": 555, \"testvar_inline_2\": \"bar\"}},\n source=mock_settings_files[0][1],\n ),\n Fragment(\n value=6,\n source=\"ENV:TEST_STUFF_TESTGROUP__TEST_VAR\",\n path=[\"testgroup\", \"test_var\"],\n ),\n Fragment(\n value=7,\n source=\"ENV:TEST_STUFF_TESTGROUP__TESTVAR\",\n path=[\"testgroup\", \"testvar\"],\n ),\n Fragment(\n value=9,\n source=\"ENV:TEST_STUFF_TESTGROUP_TEST_VAR\",\n path=[\"testgroup_test_var\"],\n ),\n ]\n\n assert len(climate._fragments) == len(expected_fragments)\n if sys.version_info[:2] >= (3, 6): # pragma: nocover\n # in python < 3.6 dicts are not ordered so we can't be sure what's up here in python 3.5\n assert climate._fragments == expected_fragments", "def test012(testDir, dirDict, pflag):\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_system.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_c.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_a.cfg\"), testDir)\n psys = osp.join(testDir, \"params_system.cfg\")\n ptrodec = osp.join(testDir, \"params_c.cfg\")\n ptrodea = osp.join(testDir, \"params_a.cfg\")\n P_s = IO.get_config(psys)\n P_s.set(\"Sim Params\", \"Crate\", \"1e-2\")\n P_s.set(\"Sim Params\", \"capFrac\", \"0.67\")\n P_s.set(\"Sim Params\", \"tsteps\", \"25\")\n P_s.set(\"Sim Params\", \"Nvol_c\", \"2\")\n P_s.set(\"Sim Params\", \"Nvol_a\", \"2\")\n P_s.set(\"Sim Params\", \"Vmin\", \"2e0\")\n P_s.set(\"Particles\", \"cs0_c\", \"0.2\")\n P_s.set(\"Particles\", \"cs0_a\", \"0.495\")\n P_s.set(\"Electrolyte\", \"elyteModelType\", \"SM\")\n IO.write_config_file(P_s, psys)\n P = IO.get_config(ptrodec)\n P.set(\"Particles\", \"type\", \"homog\")\n P.set(\"Particles\", \"shape\", \"cylinder\")\n P.set(\"Material\", \"muRfunc\", \"LiMn2O4_ss2\")\n P.set(\"Reactions\", \"rxnType\", \"BV_mod01\")\n IO.write_config_file(P, ptrodec)\n P = IO.get_config(ptrodea)\n P.set(\"Particles\", \"discretization\", \"2.5e-9\")\n P.set(\"Particles\", \"shape\", \"sphere\")\n P.set(\"Material\", \"muRfunc\", \"LiC6_coke_ss2\")\n P.set(\"Reactions\", \"rxnType\", \"BV_mod02\")\n IO.write_config_file(P, ptrodea)\n main.main(psys, keepArchive=False)\n shutil.move(dirDict[\"simOut\"], testDir)\n if pflag:\n corePlots(testDir, dirDict)\n elytePlots(testDir, dirDict)\n electrodePlots(testDir, dirDict, \"c\")\n cmpr.bulkpf(testDir, dirDict, \"c\")\n electrodePlots(testDir, dirDict, \"a\")\n cmpr.bulkpf(testDir, dirDict, \"a\")", "def test_environment_storage():\n for l in list(environments.data):\n e = environments[l]\n assert e.storage > 0", "def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])", "def test_rhostats_config():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_rhostats_config.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_rhostats.fits')\n rho_file = os.path.join('output','test_rhostats.pdf')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : { # Note: stats doesn't have to be a list.\n 'type': 'Rho',\n 'file_name': rho_file,\n 'min_sep': 30,\n 'max_sep': 600,\n 'sep_units': 'arcsec',\n 'bin_type': 'Linear',\n 'bin_size': 30,\n }\n },\n }\n piff.piffify(config, logger)\n assert os.path.isfile(rho_file)\n\n # repeat with plotify function\n os.remove(rho_file)\n piff.plotify(config, logger)\n assert os.path.isfile(rho_file)\n\n # Test rho statistics directly.\n min_sep = 1\n max_sep = 100\n bin_size = 0.1\n psf = piff.read(psf_file)\n orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)\n stats = piff.RhoStats(min_sep=min_sep, max_sep=max_sep, bin_size=bin_size)\n with np.testing.assert_raises(RuntimeError):\n stats.write('dummy') # Cannot write before compute\n stats.compute(psf, orig_stars)\n\n rhos = [stats.rho1, stats.rho2, stats.rho3, stats.rho4, stats.rho5]\n for rho in rhos:\n # Test the range of separations\n radius = np.exp(rho.logr)\n np.testing.assert_array_less(radius, max_sep)\n np.testing.assert_array_less(min_sep, radius)\n # bin_size is reduced slightly to get integer number of bins\n assert rho.bin_size < bin_size\n assert np.isclose(rho.bin_size, bin_size, rtol=0.1)\n np.testing.assert_array_almost_equal(np.diff(rho.logr), rho.bin_size, decimal=5)\n\n # Test that the max absolute value of each rho isn't crazy\n np.testing.assert_array_less(np.abs(rho.xip), 1)\n\n # # Check that each rho isn't precisely zero. This means the sum of abs > 0\n np.testing.assert_array_less(0, np.sum(np.abs(rho.xip)))\n\n # Test using the piffify executable\n os.remove(rho_file)\n config['verbose'] = 0\n with open('rho.yaml','w') as f:\n f.write(yaml.dump(config, default_flow_style=False))\n piffify_exe = get_script_name('piffify')\n p = subprocess.Popen( [piffify_exe, 'rho.yaml'] )\n p.communicate()\n assert os.path.isfile(rho_file)\n\n # Test using the plotify executable\n os.remove(rho_file)\n plotify_exe = get_script_name('plotify')\n p = subprocess.Popen( [plotify_exe, 'rho.yaml'] )\n p.communicate()\n assert os.path.isfile(rho_file)\n\n # test running plotify with dir in config, with no logger, and with a modules specification.\n # (all to improve test coverage)\n config['output']['dir'] = '.'\n config['modules'] = [ 'custom_wcs' ]\n os.remove(rho_file)\n piff.plotify(config)\n assert os.path.isfile(rho_file)", "def test_plasma_nlte_section_config(\n tardis_config_verysimple_nlte,\n nlte_raw_model,\n nlte_atom_data,\n):\n tardis_config_verysimple_nlte[\"plasma\"][\"continuum_interaction\"][\n \"species\"\n ] = [\n \"He I\",\n ]\n tardis_config_verysimple_nlte[\"plasma\"][\"nlte_ionization_species\"] = [\"H I\"]\n config = Configuration.from_config_dict(tardis_config_verysimple_nlte)\n with pytest.raises(PlasmaConfigError) as ve:\n assemble_plasma(config, nlte_raw_model, nlte_atom_data)", "def test_ifFileExists():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"file\" in testConfig.config and \"file_locations\" in testConfig.config:\n print \"File In Location: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExistsInPossibleLocations, testConfig.config\n elif \"file\" in testConfig.config:\n print \"File: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExists, testConfig.config", "def environment_preparation():\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n data_location_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('data_location')}\"\n )\n if f\"{Common.get_config_value('report_location')}\":\n if os.path.isdir(f\"{report_file_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{report_file_path}\"\n ):\n [os.remove(f\"{report_file_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{report_file_path}\")\n workbook = xlwt.Workbook()\n workbook.add_sheet(\"test1\")\n workbook.save(f\"{report_file_path}/report.xls\")\n if (\n f'{Common.get_config_value(\"data_location\")}'\n not in Common.get_config_value(\"unsupported_path\")\n ):\n try:\n if os.path.isdir(f\"{data_location_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{data_location_path}\"\n ):\n [os.remove(f\"{data_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{data_location_path}\")\n except OSError as ex:\n Common.logger.warning(f\"Path not found {ex}\")\n else:\n Common.logger.warning(f\"Path not found\")\n Common.logger.info(\"Environment preparation completed successfully\")" ]
[ "0.58821565", "0.58680266", "0.56163776", "0.55792236", "0.5568075", "0.552711", "0.5509854", "0.54249567", "0.53922117", "0.5349689", "0.53313106", "0.532554", "0.5321047", "0.5313373", "0.5290485", "0.52886117", "0.5285383", "0.528437", "0.5277534", "0.5275471", "0.5242284", "0.52414805", "0.5233837", "0.52048916", "0.5177462", "0.51728827", "0.5167861", "0.51659477", "0.5162458", "0.5159711" ]
0.7014509
0
Set the configuration for a provider by calling get_provider() with the given configuraiton.
def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None: class SetProviderDefaults: @st.hookimpl def get_provider_config(self, name, params, registry): if name != provider: return None conf = config.copy() conf.update(params) return conf st.registry.register(SetProviderDefaults())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider(self, provider):\n\n self._provider = provider", "def provider(self, provider: Provider) -> None:\n self._provider = provider", "def set_provider(self, provider):\n \n check = self.check_provider(provider)\n if check is not None:\n self.default_provider = provider\n else:\n return None", "def setup_provider(self):\n pass", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def provider(self, provider):\n allowed_values = [\"AWS\", \"AWS_GOVCLOUD\", \"AZURE\"] # noqa: E501\n if provider not in allowed_values:\n raise ValueError(\n \"Invalid value for `provider` ({0}), must be one of {1}\" # noqa: E501\n .format(provider, allowed_values)\n )\n\n self._provider = provider", "def provider_get_config(cls, name=None, provider_type=None, config=None):\n if config is None:\n raise MOLNSException(\"no config specified\")\n if name is None and provider_type is None:\n raise MOLNSException(\"provider name or type must be specified\")\n obj = None\n if obj is None and name is not None:\n try:\n obj = config.get_object(name, kind='Provider')\n except DatastoreException as e:\n pass\n if obj is None and provider_type is not None:\n if provider_type not in VALID_PROVIDER_TYPES:\n raise MOLNSException(\"unknown provider type '{0}'\".format(provider_type))\n p_hand = get_provider_handle('Provider', provider_type)\n obj = p_hand('__tmp__', data={}, config_dir=config.config_dir)\n if obj is None:\n raise MOLNSException(\"provider {0} not found\".format(name))\n ret = []\n for key, conf, value in obj.get_config_vars():\n if 'ask' in conf and not conf['ask']:\n continue\n question = conf['q']\n if value is not None:\n myval = value\n else:\n if 'default' in conf and conf['default']:\n if callable(conf['default']):\n f1 = conf['default']\n try:\n myval = f1()\n except TypeError:\n pass\n else:\n myval = conf['default']\n else:\n myval = None\n if myval is not None and 'obfuscate' in conf and conf['obfuscate']:\n myval = '********'\n ret.append({\n 'question': question,\n 'key': key,\n 'value': myval,\n 'type': 'string'\n })\n return ret", "def svc_provider(self, svc_provider):\n\n self._svc_provider = svc_provider", "def parameter_providers(self, parameter_providers):\n\n self._parameter_providers = parameter_providers", "def provider(self, provider: str):\n if provider is None:\n raise ValueError(\"Invalid value for `provider`, must not be `None`\") # noqa: E501\n\n self._provider = provider", "def set_data_provider(\n self,\n data_provider: IDataProvider,\n ) -> None:\n\n self._data_provider = data_provider", "def provider(self, provider):\n allowed_values = [\"github\"] # noqa: E501\n if provider not in allowed_values:\n raise ValueError(\n \"Invalid value for `provider` ({0}), must be one of {1}\" # noqa: E501\n .format(provider, allowed_values)\n )\n\n self._provider = provider", "def _enable(cls, provider):\r\n if provider.NAME in cls._ENABLED:\r\n raise ValueError('Provider %s already enabled' % provider.NAME)\r\n cls._ENABLED[provider.NAME] = provider", "def setConfiguration(self, config):\n raise NotImplementedError", "def cdn_provider(self, cdn_provider):\n # type: (string_types) -> None\n\n if cdn_provider is not None:\n if not isinstance(cdn_provider, string_types):\n raise TypeError(\"Invalid type for `cdn_provider`, type has to be `string_types`\")\n\n self._cdn_provider = cdn_provider", "def configure(self, config: dict):\n self.config.update(config)", "def provider(self, provider):\n if self.local_vars_configuration.client_side_validation and provider is None: # noqa: E501\n raise ValueError(\"Invalid value for `provider`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n provider is not None and len(provider) < 1):\n raise ValueError(\"Invalid value for `provider`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._provider = provider", "def __init__(self, provider_class, provider_type, label, origin, config):\n super(Provider, self).__init__()\n\n self.created = datetime.datetime.now()\n \"\"\"datetime: The creation time of this document\"\"\"\n\n self.modified = datetime.datetime.now()\n \"\"\"datetime: The last modified time of this document\"\"\"\n\n self.provider_class = ProviderClass(provider_class)\n \"\"\"ProviderClass: The class of provider, either compute or storage\"\"\"\n\n self.provider_type = provider_type\n \"\"\"str: The type (or host) of the provider. (e.g. static, gcloud, etc)\"\"\"\n\n self.label = label\n \"\"\"str: The human-readable provider label\"\"\"\n\n self.origin = origin\n \"\"\"dict: The origin (e.g. user) of the provider\"\"\"\n\n self.config = config\n \"\"\"dict: The provider-specific configuration\"\"\"", "def set_config(self, attr, value):\n setattr(self.config, attr, value)", "def set_config(self, attr, value):\n setattr(self.config, attr, value)", "def register_config(self, config):\n self.config = config", "def update_provider(self, provider_id, provider_name, endpoints, zone_id, provider_region):\n try:\n self.client.post('{api_url}/providers/{id}'.format(api_url=self.api_url, id=provider_id),\n action='edit',\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to update provider. Error: {!r}\".format(e))", "async def init_provider(self):\n self.dsp_name = \"OpenStack\"\n await self._provider.init(image_names=self.config[\"images\"].values())", "def configure_driver(self, config):\n raise NotImplementedError", "def provider_initialize(cls, provider_name, config):\n try:\n provider_obj = config.get_object(provider_name, kind='Provider')\n print \"Provider object {0}\".format(provider_obj)\n except DatastoreException as e:\n raise MOLNSException(\"provider not found\")\n #\n print \"Checking all config artifacts.\"\n # check for ssh key\n if provider_obj['key_name'] is None or provider_obj['key_name'] == '':\n print \"Error: no key_name specified.\"\n return\n elif not provider_obj.check_ssh_key():\n print \"Creating key '{0}'\".format(provider_obj['key_name'])\n provider_obj.create_ssh_key()\n else:\n print \"SSH key={0} is valid.\".format(provider_obj['key_name'])\n\n # check for security group\n if provider_obj['group_name'] is None or provider_obj['group_name'] == '':\n print \"Error: no security group specified.\"\n return\n elif not provider_obj.check_security_group():\n print \"Creating security group '{0}'\".format(provider_obj['group_name'])\n provider_obj.create_seurity_group()\n else:\n print \"security group={0} is valid.\".format(provider_obj['group_name'])\n\n # check for MOLNS image\n if provider_obj['molns_image_name'] is None or provider_obj['molns_image_name'] == '':\n if provider_obj['ubuntu_image_name'] is None or provider_obj['ubuntu_image_name'] == '':\n print \"Error: no ubuntu_image_name given, can not create molns image.\"\n else:\n print \"Creating new image, this process can take a long time (10-30 minutes).\"\n provider_obj['molns_image_name'] = provider_obj.create_molns_image()\n elif not provider_obj.check_molns_image():\n print \"Error: a molns image ID was provided, but it does not exist.\"\n return\n\n print \"Success.\"\n config.save_object(provider_obj, kind='Provider')", "def set_config(self, config_name=None, optional=False, **CONFIG_VARS):\n assert self._module\n config, _ = self.make_config_params(config_name, optional, **CONFIG_VARS)\n if config:\n self.c = config", "def configure_connector(provider):\n logging.getLogger(\"root\").info(\"Configuring the cloud connector\")\n if provider['name'] == \"~okeanos\" or provider['name'] == \"okeanos\":\n connector = OkeanosConnector()\n connector.configure(provider)\n return connector\n else:\n raise NotImplemented(\"The connector is not supported\")", "def configure(self, options, conf):\n pass", "def create_providerinfo(self, setup=None):\n pcr_class = self.server.message_factory.get_response_type(\n \"configuration_endpoint\"\n )\n _provider_info = copy.deepcopy(self.capabilities.to_dict())\n\n if self.jwks_uri and self.keyjar:\n _provider_info[\"jwks_uri\"] = self.jwks_uri\n\n for endp in self.endp:\n if not self.baseurl.endswith(\"/\"):\n baseurl = self.baseurl + \"/\"\n else:\n baseurl = self.baseurl\n _provider_info[\"{}_endpoint\".format(endp.etype)] = urljoin(\n baseurl, endp.url\n )\n\n if setup and isinstance(setup, dict):\n for key in pcr_class.c_param.keys():\n if key in setup:\n _provider_info[key] = setup[key]\n\n _provider_info[\"issuer\"] = self.name\n _provider_info[\"version\"] = \"3.0\"\n\n return pcr_class(**_provider_info)", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider" ]
[ "0.7138926", "0.6993208", "0.678772", "0.64115953", "0.63953626", "0.6311917", "0.6282703", "0.6255965", "0.6160486", "0.61591345", "0.612793", "0.59540063", "0.58079547", "0.57860327", "0.57618", "0.57555056", "0.5730834", "0.5698895", "0.56884116", "0.56884116", "0.5657185", "0.56399006", "0.56343424", "0.563126", "0.5586003", "0.55681175", "0.5560741", "0.5559315", "0.55132765", "0.5504296" ]
0.7107458
1
Test create method We create a dictionary of values We create a user from these values, he has a user profile We check that that the new user has been created with his name
def test_create(self): userValue = {'name': 'User Test 1', 'login': 'usertest1', 'user_profile_id': self.user_profile2.id, } Users = self.env['res.users'] user_test = Users.create(userValue) newUser = self.env['res.users'].browse(user_test.id) self.assertEqual(userValue['name'], newUser['name'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_create_user(profile_data):\n email = \"email@localhost\"\n username = \"username\"\n user = api.create_user(username, email, profile_data, {\"first_name\": \"Bob\"})\n\n assert isinstance(user, User)\n assert user.email == email\n assert user.username == username\n assert user.first_name == \"Bob\"\n\n if \"name\" in profile_data:\n assert user.profile.name == profile_data[\"name\"]\n else:\n assert user.profile.name is None", "def test_create_user_object(self):\n print('(' + self.test_create_user_object.__name__+')',\n self.test_create_user_object.__doc__)\n # Query to get users and users_profile for the patient\n query = 'SELECT users.*, users_profile.* FROM users, users_profile \\\n WHERE users.user_id = users_profile.user_id'\n # assert if result doesn't contain patient\n self.assertDictContainsSubset(self.connection._create_user_object(\n execute_query(self, query, 'one')), PATIENT)", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def users_create():", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_users_dictionary(self):\n new_user = self.app\n self.assertEqual(len(new_user.users), 0)\n new_user.create_user()\n self.assertIsInstance(new_user, User)\n self.assertEqual(len(new_user.users), 1)", "def test_create_user_profile(self):\n user_profile_dict = {\n 'date_of_birth': datetime.datetime(1992, 10, 27),\n 'region': 'Serbia',\n 'bio': 'This is a test user account',\n 'user_type': models.UserProfile.UserType.REGULAR,\n 'phone': '065123'\n }\n\n user = get_user_model().objects.create_user(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n user_profile = models.UserProfile.objects.filter(user_id=user).first()\n user_profile.date_of_birth = user_profile_dict['date_of_birth']\n user_profile.region = user_profile_dict['region']\n user_profile.bio = user_profile_dict['bio']\n user_profile.user_type = user_profile_dict['user_type']\n user_profile.phone = user_profile_dict['phone']\n user_profile.save()\n\n self.assertEqual(user_profile.date_of_birth,\n user_profile_dict['date_of_birth'])\n self.assertEqual(user_profile.region,\n user_profile_dict['region'])\n self.assertEqual(user_profile.bio, user_profile_dict['bio'])\n self.assertEqual(user_profile.user_type,\n user_profile_dict['user_type'])", "def test_users_can_signup(self):\n for value in self.app.users.values():\n result = self.app.create_user()\n stored_password = value['password']\n expected = {0: {\n 'email': '[email protected]', 'username': 'admin', 'password': stored_password\n }}\n self.assertEqual(expected, result)", "def test_user_create(self):\n\n # Creates event\n event = {\n \"clientId\": 2,\n \"username\": \"user\" + randstr(),\n \"pwd\": \"password\",\n \"nameLast\": \"User\",\n \"nameFirst\": \"Joe\",\n \"email\": \"[email protected]\" + randstr(),\n \"phone\": \"123-4567\",\n \"profilePicturePath\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Generates expected value\n expected = {\n 'statusCode': 200,\n 'body': '{\"success\": true, \"apicode\": \"OK\", \"apimessage\": \"User successfully created.\", \"apidataset\": {\"message\": \"User successfully created!\"}}'\n }\n\n # Invokes\n actual = handler.user_create(event=event, context=None)\n\n # Validates response\n self.assertEqual(expected, actual)", "def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )", "def test_profile_creation(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n\n self.assertEqual(self.registration_profile.objects.count(), 1)\n self.assertEqual(profile.user.id, new_user.id)\n self.assertTrue(re.match('^[a-f0-9]{40,64}$', profile.activation_key))\n self.assertEqual(str(profile),\n \"Registration information for alice\")", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_new_user(self):\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']", "def create_user(user_dict):\r\n if not user_dict.has_key('temp') or not user_dict['temp']:\r\n user_dict['id'] = create_user_profile(user_dict)\r\n else:\r\n user_dict['id'] = create_temp_user(user_dict)\r\n return user_dict['id']", "def test_that_a_user_profile_was_created_successfully(self):\n response = self.register_user(data=self.user)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id", "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), '[email protected]')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')", "def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_create_user(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n\n u1stats = self._get_current_stats(\"user\", u1)\n\n assert u1stats is not None\n\n # not in any rooms by default\n self.assertEqual(u1stats[\"joined_rooms\"], 0)", "def test_create_user(self):\n url = reverse('create_user')\n data = {\n 'first_name': 'Jimbo',\n 'email': '[email protected]',\n 'password': 'jimboland',\n 'postal_code': 'jimbo',\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().first_name, 'Jimbo')", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def _add_user(data: dict) -> dict:\n user = create_user()\n name = []\n if 'first_name' in data:\n name.append(data['first_name'])\n if 'middle_name' in data:\n name.append(data['middle_name'])\n if 'last_name' in data:\n name.append(data['last_name'])\n user['name'] = ' '.join(name)\n if 'role' in data:\n user['exp']['exp']['title'] = data['role']\n if 'affiliation' in data:\n user['abs'] = data['affiliation']\n user['exp']['exp']['company'] = data['affiliation']\n elif 'organization' in data:\n user['abs'] = data['organization']\n user['exp']['exp']['company'] = data['organization']\n phone = []\n if 'phone' in data:\n phone.append(data['phone'])\n if 'phone_ext' in data:\n phone.append(data['phone_ext'])\n user['contact']['phone'] = '-'.join(phone)\n user['contact']['email'] = data['email'] if 'email' in data else ''\n if 'degrees' in data:\n if not user.title:\n user['edu']['degree'] = data['degrees']\n if len(user['name']) < 0:\n user['name'] = user['contact']['email'] if len(user['contact']['email']) > 0 else 'Anonymous'\n return user", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)" ]
[ "0.78253496", "0.765791", "0.76069736", "0.7524077", "0.7516569", "0.7485291", "0.74447876", "0.7440378", "0.7425919", "0.7425513", "0.74086386", "0.73996085", "0.7388538", "0.7337399", "0.7334261", "0.7334261", "0.7334261", "0.7330658", "0.72654027", "0.7246871", "0.72445744", "0.72070765", "0.7181829", "0.7163165", "0.7160655", "0.7147294", "0.713597", "0.71332127", "0.71321875", "0.7109242" ]
0.83480036
0
Test write method We use the user created in the first method We change his user_profile_id We check if the update has been done
def test_write(self): userEdited = self.env['res.users'].browse( self.user.id).write({'user_profile_id': self.user_profile2.id}) self.assertEqual(userEdited, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_profile_attribute(self):\n user = self.users[0]\n user.profile.bio = 'bio'\n user.profile.save()\n query = User.objects.first()\n self.assertTrue(query.profile.bio == 'bio')", "def test_update_user(self):\n pass", "def test_can_update_user_profile(self):\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])\n self.assertEqual(self.user.email, self.updated_data['email'])", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def test_change_username_changes_profile(self):\n user = self.users[0]\n profile = user.profile\n user.profile.bio = 'bio'\n user.profile.save()\n self.assertEquals(user.profile.bio, profile.bio)", "def test_change_profile_changes_user(self):\n user = self.users[0]\n user.username = 'newname'\n profile = user.profile\n user.profile.save()\n self.assertEquals(user.username, profile.user.username)", "def test_05_update_user_profile(self):\r\n\r\n\r\n # Create an account and log in\r\n self.register()\r\n url = \"/account/fake/update\"\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Update profile with new data\r\n res = self.update_profile(method=\"GET\")\r\n msg = \"Update your profile: %s\" % self.user.fullname\r\n assert self.html_title(msg) in res.data, res.data\r\n msg = 'input id=\"id\" name=\"id\" type=\"hidden\" value=\"1\"'\r\n assert msg in res.data, res\r\n assert self.user.fullname in res.data, res\r\n assert \"Save the changes\" in res.data, res\r\n msg = '<a href=\"/account/johndoe/update\" class=\"btn\">Cancel</a>'\r\n assert msg in res.data, res.data\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"johndoe2@example\",\r\n locale=\"en\")\r\n assert \"Please correct the errors\" in res.data, res.data\r\n\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"[email protected]\",\r\n locale=\"en\")\r\n title = \"Update your profile: John Doe 2\"\r\n assert self.html_title(title) in res.data, res.data\r\n assert \"Your profile has been updated!\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe\" in res.data, res\r\n assert \"[email protected]\" in res.data, res\r\n\r\n # Updating the username field forces the user to re-log in\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"[email protected]\",\r\n locale=\"en\",\r\n new_name=\"johndoe2\")\r\n assert \"Your profile has been updated!\" in res.data, res\r\n assert \"Please sign in\" in res.data, res.data\r\n\r\n res = self.signin(method=\"POST\", email=\"[email protected]\",\r\n password=\"p4ssw0rd\",\r\n next=\"%2Faccount%2Fprofile\")\r\n assert \"Welcome back John Doe 2\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe2\" in res.data, res\r\n assert \"[email protected]\" in res.data, res\r\n\r\n res = self.signout()\r\n assert self.html_title() in res.data, res\r\n assert \"You are now signed out\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile()\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n url = \"/account/johndoe2/update\"\r\n res = self.app.get(url)\r\n assert res.status_code == 403", "def test_update_profile_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'newpassword'\n }\n res = self.client.patch(ME_URL, payload)\n\n # Refresh the user object with latest values from db\n self.user.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(self.user.email, payload['email'])\n self.assertTrue(self.user.check_password(payload['password']))", "def test_onchange_user_profile(self):\n admin = self.env.ref('base.user_root').id\n with self.assertRaises(ValidationError):\n self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': admin})", "def test_userprofile_modification(self):\n self.user.userprofile.save(update_fields=['enabled'])\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def test_update_user_profile(setup_client, setup_user):\n client = setup_client\n user = setup_user\n payload = {\n \"name\": \"New name\",\n \"role\": \"Purchaser\",\n \"password\": \"New password\"\n }\n res = client.patch(ME_URL, payload)\n user.refresh_from_db()\n assert res.status_code == status.HTTP_200_OK\n assert user.name == payload[\"name\"]\n assert user.role == payload[\"role\"]\n assert user.check_password(payload[\"password\"])\n assert res.status_code == status.HTTP_200_OK", "def test_user_update(self):\n update_data = {\n \"username\": \"testnotUser\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"profile\": {\n \"user\": 1,\n \"contact_number\": \"9860476499\",\n \"address\": \"kapan\",\n \"education\": self.education,\n },\n }\n # files = {'media': open('accounts/tests/1.png', 'rb')}\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.put(reverse(\"account:user-update\"), update_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['username'], \"testnotUser\")\n self.assertNotEqual(response.data['username'], \"testUser\")", "def test_create_user_profile(self):\n user_profile_dict = {\n 'date_of_birth': datetime.datetime(1992, 10, 27),\n 'region': 'Serbia',\n 'bio': 'This is a test user account',\n 'user_type': models.UserProfile.UserType.REGULAR,\n 'phone': '065123'\n }\n\n user = get_user_model().objects.create_user(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n user_profile = models.UserProfile.objects.filter(user_id=user).first()\n user_profile.date_of_birth = user_profile_dict['date_of_birth']\n user_profile.region = user_profile_dict['region']\n user_profile.bio = user_profile_dict['bio']\n user_profile.user_type = user_profile_dict['user_type']\n user_profile.phone = user_profile_dict['phone']\n user_profile.save()\n\n self.assertEqual(user_profile.date_of_birth,\n user_profile_dict['date_of_birth'])\n self.assertEqual(user_profile.region,\n user_profile_dict['region'])\n self.assertEqual(user_profile.bio, user_profile_dict['bio'])\n self.assertEqual(user_profile.user_type,\n user_profile_dict['user_type'])", "def test_user_profile_relationship(self):\r\n user = self._create_test_user()\r\n profile = self._create_test_profile()\r\n user.profile = profile\r\n self.db.session.commit()", "def test_update_user_profile(self):\n payload = {\"name\": \"Lucifer\", 'password': \"12346987\"}\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\r\n payload = {\r\n 'name': 'new_name',\r\n 'password': 'password123'\r\n }\r\n\r\n res = self.client.patch(ME_URL, payload)\r\n\r\n self.user.refresh_from_db()\r\n\r\n self.assertEqual(self.user.name, payload['name'])\r\n self.assertTrue(self.user.check_password(payload['password']))\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n response = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_profile(self):\n url = self.url\n url = url + '{}/'.format(\n self.profile.pk\n )\n response = self.client.patch(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n update_data = {\n 'first_name': 'UpdateTest'\n }\n\n response = self.client.patch(url, update_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Caso 1\n update_data['tasks_finalize'] = '14124123'\n update_data['tasks_pending'] = '124123132'\n update_data['tasks_created'] = '12412323'\n\n response = self.client.patch(url, update_data, format='json')\n self.assertEqual(response.data['tasks_finalize'], 0)\n self.assertEqual(response.data['tasks_pending'], 0)\n self.assertEqual(response.data['tasks_created'], 0)", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_save_profile_with_existing_photo(self):\n # Set a user with a photo\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"normal_photo.jpg\")\n self._upload_photo(user, file_path)\n\n # Re-save profile without uploading a new photo.\n data = {\n \"full_name\": user.userprofile.full_name,\n \"email\": user.email,\n \"username\": user.username,\n \"lat\": 40.005814,\n \"lng\": -3.42071,\n \"externalaccount_set-MAX_NUM_FORMS\": \"1000\",\n \"externalaccount_set-INITIAL_FORMS\": \"0\",\n \"externalaccount_set-TOTAL_FORMS\": \"0\",\n \"language_set-MAX_NUM_FORMS\": \"1000\",\n \"language_set-INITIAL_FORMS\": \"0\",\n \"language_set-TOTAL_FORMS\": \"0\",\n \"basic_section\": \"\",\n }\n\n for field in UserProfilePrivacyModel._meta.fields:\n data[field.name] = MOZILLIANS\n data[\"privacy_tshirt\"] = PRIVATE\n\n with override_script_prefix(\"/en-US/\"):\n url = reverse(\"phonebook:profile_edit\")\n with self.login(user) as client:\n response = client.post(url, data=data, follow=True)\n eq_(response.status_code, 200)", "def test_update_user_profile(self):\n payload = {'name': 'Test name', 'password': 'new_password'}\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_useruser_uuid_put(self):\n pass", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def test_update_profile_valid_put(self):\n update_user = {\n 'email': '[email protected]',\n 'password': 'NewPassword!',\n }\n res = self.client.put(ME_URL, update_user)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n self.user.refresh_from_db()\n\n self.assertTrue(self.user.check_password(update_user['password']))\n self.assertEquals(self.user.email, update_user['email'])\n self.assertTrue(self.user.name)", "def updateProfile( token, user=False, userinfo={'nickname':'newUser','first_name':'newUser'}):\n \n if not user:\n l= list(validName)\n sysrand.shuffle(l)\n l= \"\".join(l)\n print \"Attempting to create a user with the name \"+l\n user=User.objects.create_user(l,'')\n user.save()\n sid = transaction.savepoint()\n updateName( user, str(userinfo['nickname']).replace(' ',''), userinfo['first_name'], sid )\n transaction.savepoint_commit(sid)\n\n try: \n userprofile = user.get_profile()\n userprofile.uid = cPickle.dumps(token) #ensures the token parameter is retreivable and unique\n userprofile.user_id = user.id\n userprofile.save()\n transaction.commit()\n except:\n transaction.rollback()\n return user", "def test_user_writable(self):\n u = self.d.user('example')\n u.name # Trigger a fetch\n\n method, url, data, headers = self.d._fetcher.requests[0]\n self.assertEqual(method, 'GET')\n self.assertEqual(url, '/users/example')\n\n new_home_page = 'http://www.discogs.com'\n u.home_page = new_home_page\n self.assertTrue('home_page' in u.changes)\n self.assertFalse('profile' in u.changes)\n\n u.save()\n\n # Save\n method, url, data, headers = self.d._fetcher.requests[1]\n self.assertEqual(method, 'POST')\n self.assertEqual(url, '/users/example')\n self.assertEqual(data, {'home_page': new_home_page})\n\n # Refresh\n method, url, data, headers = self.d._fetcher.requests[2]\n self.assertEqual(method, 'GET')\n self.assertEqual(url, '/users/example')", "def test_user_profile_setname(url):\n test_clear(url)\n admin_tk, admin_id = channel_user_create_0(url)\n\n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == '[email protected]'\n assert profile_resp['user']['name_first'] == 'admin'\n assert profile_resp['user']['name_last'] == 'admin'\n\n test_profile_setname = {\n 'token': admin_tk,\n 'name_first': 'new_first',\n 'name_last': 'new_last'\n }\n requests.put(url + \"user/profile/setname\", json=test_profile_setname)\n \n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == '[email protected]'\n assert profile_resp['user']['name_first'] == 'new_first'\n assert profile_resp['user']['name_last'] == 'new_last'", "def test_user_profile_creation_is_successful(self):\n user_profile = UserProfile.objects.get(user_id=self.user_1.id)\n user_profile_count = UserProfile.objects.count()\n self.assertEqual(user_profile_count, 2)\n self.assertEqual(\n str(user_profile), \"{}'s profile\".format(user_profile.user.username)\n )", "def test_modify_user(self):\n print('(' + self.test_modify_user.__name__+')',\n self.test_modify_user.__doc__)\n # modify the user with provided user dict\n modify_resp = self.connection.modify_user(\n PATIENT_USERNAME, MODIFIED_PATIENT['public_profile'],\n MODIFIED_PATIENT['restricted_profile'])\n self.assertEqual(modify_resp, PATIENT_USERNAME)\n # check each value in the profile with the modified one, see if modification successful\n # get the get_user response\n get_resp = self.connection.get_user(PATIENT_USERNAME)\n resp_r_profile = get_resp['restricted_profile']\n r_profile = MODIFIED_PATIENT['restricted_profile']\n self.assertEqual(\n r_profile['user_id'], resp_r_profile['user_id'])\n self.assertEqual(r_profile['firstname'], resp_r_profile['firstname'])\n self.assertEqual(r_profile['lastname'], resp_r_profile['lastname'])\n self.assertEqual(r_profile['work_address'],\n resp_r_profile['work_address'])\n self.assertEqual(r_profile['gender'], resp_r_profile['gender'])\n self.assertEqual(r_profile['age'], resp_r_profile['age'])\n self.assertEqual(r_profile['email'], resp_r_profile['email'])\n self.assertDictContainsSubset(get_resp, MODIFIED_PATIENT)" ]
[ "0.73722833", "0.7240795", "0.723858", "0.7233905", "0.7230748", "0.713083", "0.70846134", "0.7058695", "0.70558417", "0.7032471", "0.699041", "0.6960625", "0.6956722", "0.69488096", "0.6937412", "0.6928816", "0.6922781", "0.69169754", "0.6872866", "0.68542856", "0.6845763", "0.6832158", "0.6822358", "0.6806013", "0.6747677", "0.67389697", "0.6706937", "0.670156", "0.6695723", "0.66875124" ]
0.8568167
0
Test _check_user_profile_id method We try to create a user with admin as user profile It raises a Validation Error
def test_check_user_profile_id(self): userValue = {'name': 'User Test 1', 'login': 'usertest1', 'user_profile_id': self.env.ref('base.user_root').id, } with self.assertRaises(ValidationError): self.env['res.users'].create(userValue)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_onchange_user_profile(self):\n admin = self.env.ref('base.user_root').id\n with self.assertRaises(ValidationError):\n self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': admin})", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')", "def test_create_profile_on_user_created(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def test_create_user_profile(self):\n user_profile_dict = {\n 'date_of_birth': datetime.datetime(1992, 10, 27),\n 'region': 'Serbia',\n 'bio': 'This is a test user account',\n 'user_type': models.UserProfile.UserType.REGULAR,\n 'phone': '065123'\n }\n\n user = get_user_model().objects.create_user(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n user_profile = models.UserProfile.objects.filter(user_id=user).first()\n user_profile.date_of_birth = user_profile_dict['date_of_birth']\n user_profile.region = user_profile_dict['region']\n user_profile.bio = user_profile_dict['bio']\n user_profile.user_type = user_profile_dict['user_type']\n user_profile.phone = user_profile_dict['phone']\n user_profile.save()\n\n self.assertEqual(user_profile.date_of_birth,\n user_profile_dict['date_of_birth'])\n self.assertEqual(user_profile.region,\n user_profile_dict['region'])\n self.assertEqual(user_profile.bio, user_profile_dict['bio'])\n self.assertEqual(user_profile.user_type,\n user_profile_dict['user_type'])", "def test_create_user(profile_data):\n email = \"email@localhost\"\n username = \"username\"\n user = api.create_user(username, email, profile_data, {\"first_name\": \"Bob\"})\n\n assert isinstance(user, User)\n assert user.email == email\n assert user.username == username\n assert user.first_name == \"Bob\"\n\n if \"name\" in profile_data:\n assert user.profile.name == profile_data[\"name\"]\n else:\n assert user.profile.name is None", "def test_profile_creation(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n\n self.assertEqual(self.registration_profile.objects.count(), 1)\n self.assertEqual(profile.user.id, new_user.id)\n self.assertTrue(re.match('^[a-f0-9]{40,64}$', profile.activation_key))\n self.assertEqual(str(profile),\n \"Registration information for alice\")", "def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id", "def test_user_profile_creation_is_successful(self):\n user_profile = UserProfile.objects.get(user_id=self.user_1.id)\n user_profile_count = UserProfile.objects.count()\n self.assertEqual(user_profile_count, 2)\n self.assertEqual(\n str(user_profile), \"{}'s profile\".format(user_profile.user.username)\n )", "def test_that_a_user_profile_was_created_successfully(self):\n response = self.register_user(data=self.user)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_registration_profile_created(self):\n self.assertEqual(UserProfile.objects.count(), 2)", "def test_one_profile(self):\n User.objects.create_user(\n username=\"koalabear\",\n email=\"[email protected]\",\n password=\"secret\")\n\n c = Client()\n response = c.get(reverse('profiles:index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 1)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_user_profile_created(self):\n\t\tself.assertTrue(\n\t\t\thasattr(self.user, 'profile'),\n\t\t\t\"User profile does not exist.\"\n\t\t)\n\n\t\tself.assertTrue(\n\t\t\tisinstance(self.user.profile, UserProfile),\n\t\t\t\"User profile is of type {}, expected to be of type {}\".format(\n\t\t\t\ttype(self.user.profile), type(UserProfile)\n\t\t\t)\n\t\t)", "def test_profile_created(self):\n user = User.objects.create_user(\n username=\"koalabear\",\n email=\"[email protected]\",\n password=\"secret\")\n\n self.assertEqual(User.objects.get(username=\"koalabear\").email,\n \"[email protected]\")\n\n user.profile.location = \"Edinburgh\"\n user.save()\n\n self.assertEqual(Profile.objects.get(pk=user.id).location, \"Edinburgh\")", "def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_create_with_profile(self):\n # make sure the guest not exists\n self.sdkapi.guest_create(self.userid, 1, 1024,\n user_profile=CONF.zvm.user_profile)\n self.assertTrue(\n self.test_util.wait_until_create_userid_complete(self.userid))", "def test_create_profile(self):\n resp = self.client.post(self.register_url, self.user, format=\"json\")\n current_users = User.objects.count()\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), current_users)\n self.assertEqual(Profile.objects.count(), current_users)", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_user_profile(url):\n test_clear(url)\n admin_tk, admin_id = channel_user_create_0(url)\n\n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == '[email protected]'\n assert profile_resp['user']['name_first'] == 'admin'\n assert profile_resp['user']['name_last'] == 'admin'", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_Profile(self):\n self.assertEquals(self.user_1.username, 'testuser')\n # self.assertEquals(self.user_1.password, '12345')\n self.assertEquals(self.user_1.email,\n '[email protected]')", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_70_public_user_profile(self):\r\n Fixtures.create()\r\n\r\n # Should work as an anonymous user\r\n url = '/account/%s/' % Fixtures.name\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a public profile page for the user\"\r\n assert Fixtures.fullname in res.data, err_msg\r\n\r\n # Should work as an authenticated user\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n assert Fixtures.fullname in res.data, err_msg\r\n\r\n # Should return 404 when a user does not exist\r\n url = '/account/a-fake-name-that-does-not-exist/'\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"It should return a 404\"\r\n assert res.status_code == 404, err_msg" ]
[ "0.7712734", "0.7201624", "0.7119838", "0.69911945", "0.6966534", "0.6954981", "0.69544595", "0.69232625", "0.68903434", "0.68688196", "0.6784438", "0.67346376", "0.6706729", "0.67004544", "0.67004544", "0.67004544", "0.6691957", "0.66688347", "0.6655687", "0.6626389", "0.6621808", "0.65646887", "0.65492225", "0.65061677", "0.6477158", "0.64692473", "0.6464656", "0.6450963", "0.64349395", "0.6404357" ]
0.8522871
0
Test onchange user profile method We try to set the profile of an existing user to admin It raises a Validation Error
def test_onchange_user_profile(self): admin = self.env.ref('base.user_root').id with self.assertRaises(ValidationError): self.env['res.users'].browse( self.user.id).write({'user_profile_id': admin})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_change_profile_changes_user(self):\n user = self.users[0]\n user.username = 'newname'\n profile = user.profile\n user.profile.save()\n self.assertEquals(user.username, profile.user.username)", "def test_change_username_changes_profile(self):\n user = self.users[0]\n profile = user.profile\n user.profile.bio = 'bio'\n user.profile.save()\n self.assertEquals(user.profile.bio, profile.bio)", "def test_can_update_user_profile(self):\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])\n self.assertEqual(self.user.email, self.updated_data['email'])", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def test_that_a_user_can_edit_their_profile(self):\n self.authorize_user(self.user_login_details)\n url = self.profiles_url + \\\n '{}'.format(self.user['user']['username']) + \"/\"\n response = self.client.patch(url, data=self.user_bio)\n self.assertEqual(response.data['bio'], \"You are a peculiar man.\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_profile_attribute(self):\n user = self.users[0]\n user.profile.bio = 'bio'\n user.profile.save()\n query = User.objects.first()\n self.assertTrue(query.profile.bio == 'bio')", "def test_userprofile_modification(self):\n self.user.userprofile.save(update_fields=['enabled'])\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def test_user_edit_profile(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser_id\n\n address = \"1215 Brookview Ave, Kettering, Ohio 45409\"\n\n resp = c.get(f\"/users/8989/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\n '<h1 class=\"Display-4 text-center mt-3\"><b>Profile Information:</b></h1>',\n html,\n )\n self.assertIn(\"<p>testuser</p>\", html)\n self.assertIn(\"<p>[email protected]</p>\", html)\n self.assertIn(\"<p>662-996-3356</p>\", html)\n self.assertIn(\n '<a class=\"font-weight-bold btn winter-neva-gradient color-block btn-block my-4 waves-effect z-depth-0\" href=\"/users/8989/edit\">Edit Profile</a>',\n html,\n )", "def test_update_user(self):\n pass", "def user_update_profile():\n \n if 'userid' and 'email' not in request.forms:\n return {'status':'Failure','message':'User Id is missing,please try with correct data.'}\n \n data = user_obj.user_update_profile(request.forms)\n return data", "def test_write(self):\n userEdited = self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': self.user_profile2.id})\n self.assertEqual(userEdited, True)", "def edit_profile():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify that user has logged in and the request is legit\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified != True: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # part2: check json\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_CREATE_USER_NO_JSON)\n if checked_json != True: return response\n # part3: verify json data\n try:\n user_email = login_session[\"login_user_email\"]\n except KeyError:\n # key error means we are offline til this far\n user_email = requested_json[\"email\"]\n # design decision: if there are invalid field names, only update the valid fields.\n # check updates keys and formats\n try:\n update_pairs = convert_to_underscore(requested_json[\"updates\"])\n \n if isinstance(update_pairs,dict) != True:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NON_DICT,400)\n else:\n correct_format,valid_update_pairs, response = process_request_json(User,update_pairs)\n if correct_format == True: \n update_field(User, session, {\"email\": user_email},valid_update_pairs)\n response = generate_message(MESSAGE_UPDATE_PROFILE_SUCCESS,200)\n except KeyError:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NO_ENTRY,400)\n return response", "def test_that_a_user_cannot_edit_another_users_profile(self):\n self.authorize_user(self.user_login_details)\n self.register_user(self.user2)\n url = self.profiles_url + \\\n '{}'.format(self.user2['user']['username']) + \"/\"\n response = self.client.patch(url, data=self.user_bio)\n message = \"You don't have permission to edit this profile\"\n self.assertEqual(response.data['message'], message)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_modify_user(self):\n print('(' + self.test_modify_user.__name__+')',\n self.test_modify_user.__doc__)\n # modify the user with provided user dict\n modify_resp = self.connection.modify_user(\n PATIENT_USERNAME, MODIFIED_PATIENT['public_profile'],\n MODIFIED_PATIENT['restricted_profile'])\n self.assertEqual(modify_resp, PATIENT_USERNAME)\n # check each value in the profile with the modified one, see if modification successful\n # get the get_user response\n get_resp = self.connection.get_user(PATIENT_USERNAME)\n resp_r_profile = get_resp['restricted_profile']\n r_profile = MODIFIED_PATIENT['restricted_profile']\n self.assertEqual(\n r_profile['user_id'], resp_r_profile['user_id'])\n self.assertEqual(r_profile['firstname'], resp_r_profile['firstname'])\n self.assertEqual(r_profile['lastname'], resp_r_profile['lastname'])\n self.assertEqual(r_profile['work_address'],\n resp_r_profile['work_address'])\n self.assertEqual(r_profile['gender'], resp_r_profile['gender'])\n self.assertEqual(r_profile['age'], resp_r_profile['age'])\n self.assertEqual(r_profile['email'], resp_r_profile['email'])\n self.assertDictContainsSubset(get_resp, MODIFIED_PATIENT)", "def profile_edit():\n form = ProfileForm(obj=current_user)\n\n if form.validate_on_submit():\n form.populate_obj(current_user)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Profile updated correctly'), 'success')\n\n return render_template('admin/profile/edit.html', form=form)\n\n except IntegrityError:\n # Email already exists\n correct = False\n form.errors.email.append(_('Email is already registered'))\n\n return render_template('admin/profile/edit.html', form=form)\n\n except Exception:\n # Catch anything unknown\n correct = False\n\n flash(_('Failed to update profile, contact an administrator'), 'error')\n\n return render_template('admin/profile/edit.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/edit.html', form=form)", "def update_user():", "def test_create_profile_on_access(self):\n user = User.objects.create_user(\n 'auto_tester', '[email protected]', 'auto_tester')\n profile = user.get_profile()\n profile.delete()\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def user_profile():\n user = current_user\n user_is_valid = True\n if not user.active:\n flash('This user account is under review. Please update your profile '\n + ' and contact the organizing team to access all functions of '\n + 'this platform.', 'warning')\n\n form = UserForm(obj=user, next=request.args.get('next'))\n form.roles.choices = [(r.id, r.name) for r in Role.query.order_by('name')]\n\n # Check conflicting PKs\n if form.email.data != user.email:\n if User.query.filter_by(email=form.email.data).first() is not None:\n flash('This e-mail address is already registered.', 'error')\n user_is_valid = False\n\n if user.sso_id:\n # Do not allow changing password on SSO\n del form.password\n\n # Validation has passed\n if form.is_submitted() and form.validate() and user_is_valid:\n # Assign roles\n user.roles = [Role.query.filter_by(\n id=r).first() for r in form.roles.data]\n del form.roles\n\n # Sanitize username\n user.username = sanitize_input(form.username.data)\n del form.username\n\n # Assign password if changed\n originalhash = user.password\n form.populate_obj(user)\n # Do not allow changing password on SSO\n if not user.sso_id:\n if form.password.data:\n user.set_password(form.password.data)\n else:\n user.password = originalhash\n\n user.updated_at = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n user.socialize()\n flash('Profile updated.', 'success')\n return redirect(url_for('public.user', username=user.username))\n\n if not form.roles.choices:\n del form.roles\n else:\n form.roles.data = [(r.id) for r in user.roles]\n return render_template('public/useredit.html',\n oauth_type=oauth_type(),\n user=user, form=form,\n active='profile')", "def test_user_profile_setname(url):\n test_clear(url)\n admin_tk, admin_id = channel_user_create_0(url)\n\n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == '[email protected]'\n assert profile_resp['user']['name_first'] == 'admin'\n assert profile_resp['user']['name_last'] == 'admin'\n\n test_profile_setname = {\n 'token': admin_tk,\n 'name_first': 'new_first',\n 'name_last': 'new_last'\n }\n requests.put(url + \"user/profile/setname\", json=test_profile_setname)\n \n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == '[email protected]'\n assert profile_resp['user']['name_first'] == 'new_first'\n assert profile_resp['user']['name_last'] == 'new_last'", "def test_update_profile_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'newpassword'\n }\n res = self.client.patch(ME_URL, payload)\n\n # Refresh the user object with latest values from db\n self.user.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(self.user.email, payload['email'])\n self.assertTrue(self.user.check_password(payload['password']))", "def test_Profile(self):\n self.assertEquals(self.user_1.username, 'testuser')\n # self.assertEquals(self.user_1.password, '12345')\n self.assertEquals(self.user_1.email,\n '[email protected]')", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n response = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\r\n payload = {\r\n 'name': 'new_name',\r\n 'password': 'password123'\r\n }\r\n\r\n res = self.client.patch(ME_URL, payload)\r\n\r\n self.user.refresh_from_db()\r\n\r\n self.assertEqual(self.user.name, payload['name'])\r\n self.assertTrue(self.user.check_password(payload['password']))\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def edit_user():\n if CURR_USER_KEY in session:\n user = g.user\n form = ProfileEditForm(obj=user)\n\n if form.validate_on_submit():\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.description = form.description.data\n user.email = form.email.data\n user.image_url = form.image_url.data or \"/static/images/default-pic.png\"\n\n db.session.commit()\n\n flash(\"Profile edited.\")\n return redirect(\"/profile\")\n\n return render_template('/profile/edit-form.html', form=form)\n else:\n return redirect('/login')", "def test_update_user_profile(setup_client, setup_user):\n client = setup_client\n user = setup_user\n payload = {\n \"name\": \"New name\",\n \"role\": \"Purchaser\",\n \"password\": \"New password\"\n }\n res = client.patch(ME_URL, payload)\n user.refresh_from_db()\n assert res.status_code == status.HTTP_200_OK\n assert user.name == payload[\"name\"]\n assert user.role == payload[\"role\"]\n assert user.check_password(payload[\"password\"])\n assert res.status_code == status.HTTP_200_OK", "def test_profile_created(self):\n user = User.objects.create_user(\n username=\"koalabear\",\n email=\"[email protected]\",\n password=\"secret\")\n\n self.assertEqual(User.objects.get(username=\"koalabear\").email,\n \"[email protected]\")\n\n user.profile.location = \"Edinburgh\"\n user.save()\n\n self.assertEqual(Profile.objects.get(pk=user.id).location, \"Edinburgh\")", "def test_update_user_profile(self):\n payload = {\"name\": \"Lucifer\", 'password': \"12346987\"}\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_05_update_user_profile(self):\r\n\r\n\r\n # Create an account and log in\r\n self.register()\r\n url = \"/account/fake/update\"\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Update profile with new data\r\n res = self.update_profile(method=\"GET\")\r\n msg = \"Update your profile: %s\" % self.user.fullname\r\n assert self.html_title(msg) in res.data, res.data\r\n msg = 'input id=\"id\" name=\"id\" type=\"hidden\" value=\"1\"'\r\n assert msg in res.data, res\r\n assert self.user.fullname in res.data, res\r\n assert \"Save the changes\" in res.data, res\r\n msg = '<a href=\"/account/johndoe/update\" class=\"btn\">Cancel</a>'\r\n assert msg in res.data, res.data\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"johndoe2@example\",\r\n locale=\"en\")\r\n assert \"Please correct the errors\" in res.data, res.data\r\n\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"[email protected]\",\r\n locale=\"en\")\r\n title = \"Update your profile: John Doe 2\"\r\n assert self.html_title(title) in res.data, res.data\r\n assert \"Your profile has been updated!\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe\" in res.data, res\r\n assert \"[email protected]\" in res.data, res\r\n\r\n # Updating the username field forces the user to re-log in\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"[email protected]\",\r\n locale=\"en\",\r\n new_name=\"johndoe2\")\r\n assert \"Your profile has been updated!\" in res.data, res\r\n assert \"Please sign in\" in res.data, res.data\r\n\r\n res = self.signin(method=\"POST\", email=\"[email protected]\",\r\n password=\"p4ssw0rd\",\r\n next=\"%2Faccount%2Fprofile\")\r\n assert \"Welcome back John Doe 2\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe2\" in res.data, res\r\n assert \"[email protected]\" in res.data, res\r\n\r\n res = self.signout()\r\n assert self.html_title() in res.data, res\r\n assert \"You are now signed out\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile()\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n url = \"/account/johndoe2/update\"\r\n res = self.app.get(url)\r\n assert res.status_code == 403", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]" ]
[ "0.7477738", "0.7256297", "0.7110831", "0.69922745", "0.6937833", "0.6931283", "0.6902336", "0.69003004", "0.68779624", "0.6848639", "0.6826865", "0.67532784", "0.67202806", "0.6707016", "0.66928405", "0.6688107", "0.6653077", "0.6639785", "0.66318655", "0.6623886", "0.6619158", "0.65980595", "0.6595778", "0.65877205", "0.6583775", "0.65674365", "0.65587795", "0.6545317", "0.6526551", "0.6514187" ]
0.88653076
0
Force to use assigned new ld.so by changing the binary
def change_ld(binary, ld): if not os.access(ld, os.R_OK): log.failure("Invalid path {} to ld".format(ld)) return None if not isinstance(binary, ELF): if not os.access(binary, os.R_OK): log.failure("Invalid path {} to binary".format(binary)) return None binary = ELF(binary) for segment in binary.segments: if segment.header['p_type'] == 'PT_INTERP': size = segment.header['p_memsz'] addr = segment.header['p_paddr'] data = segment.data() if size <= len(ld): log.failure("Failed to change PT_INTERP from {} to {}".format(data, ld)) return None binary.write(addr, ld.ljust(size, '\0')) if not os.access('/tmp/pwn', os.F_OK): os.mkdir('/tmp/pwn') path = '/tmp/pwn/{}_debug'.format(os.path.basename(binary.path)) if os.access(path, os.F_OK): os.remove(path) info("Removing exist file {}".format(path)) binary.save(path) os.chmod(path, 0b111000000) #rwx------ success("PT_INTERP has changed from {} to {}. Using temp file {}".format(data, ld, path)) return ELF(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_executable(fname):\n default_encoding = sys.getdefaultencoding()\n try:\n ostype = subprocess.check_output(\n ['uname', '-s']).strip().decode(default_encoding)\n except subprocess.CalledProcessError:\n return\n except OSError as reason:\n if getattr(reason, 'winerror', None) is not None:\n return\n raise reason\n\n if ostype != \"Linux\":\n return\n\n if not os.path.exists(\"/etc/NIXOS\"):\n return\n if os.path.exists(\"/lib\"):\n return\n\n # At this point we're pretty sure the user is running NixOS\n nix_os_msg = \"info: you seem to be running NixOS. Attempting to patch\"\n print(nix_os_msg, fname)\n\n try:\n interpreter = subprocess.check_output(\n [\"patchelf\", \"--print-interpreter\", fname])\n interpreter = interpreter.strip().decode(default_encoding)\n except subprocess.CalledProcessError as reason:\n print(\"warning: failed to call patchelf:\", reason)\n return\n\n loader = interpreter.split(\"/\")[-1]\n\n try:\n ldd_output = subprocess.check_output(\n ['ldd', '/run/current-system/sw/bin/sh'])\n ldd_output = ldd_output.strip().decode(default_encoding)\n except subprocess.CalledProcessError as reason:\n print(\"warning: unable to call ldd:\", reason)\n return\n\n for line in ldd_output.splitlines():\n libname = line.split()[0]\n if libname.endswith(loader):\n loader_path = libname[:len(libname) - len(loader)]\n break\n else:\n print(\"warning: unable to find the path to the dynamic linker\")\n return\n\n correct_interpreter = loader_path + loader\n\n try:\n subprocess.check_output(\n [\"patchelf\", \"--set-interpreter\", correct_interpreter, fname])\n except subprocess.CalledProcessError as reason:\n print(\"warning: failed to call patchelf:\", reason)\n return", "def ld(output_name, other_args, arch=None):\n my_args = []\n if not (arch is None or len(arch) == 0):\n my_args.append(\"-arch\")\n my_args.append(arch)\n \n cmd = \"/usr/bin/ld -o \" + output_name + \" \" + \" \".join(other_args+my_args)\n print cmd\n os.system(cmd)", "def set_linker_script(self, op):\n self.__linker_script = [\"-T\", op]", "def test_llvm_readelf(self):\n self.assertEqual(\n self.ndk.llvm_readelf,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-readelf\",\n )", "def fixMissingPythonLib(self, binaries):\n if target_platform != 'linux2': return\n\n name = 'libpython%d.%d.so' % sys.version_info[:2]\n for (nm, fnm, typ) in binaries:\n if typ == 'BINARY' and name in fnm:\n # lib found\n return\n\n lib = bindepend.findLibrary(name)\n if lib is None:\n raise IOError(\"Python library not found!\")\n\n binaries.append((os.path.split(lib)[1], lib, 'BINARY'))", "def fixLDPath( root, ldpath, directory ):\n\n if os.path.exists( directory ):\n shutil.rmtree( directory )\n\n start = os.getcwd()\n os.mkdir( directory )\n os.chdir( directory )\n uniqueLD = uniquePath( ldpath )\n\n if DEBUG:\n print 'Unique LD LIBRARY PATH is:'\n print uniqueLD\n sys.stdout.flush()\n\n ldlist = string.split( uniqueLD, ':' )\n if DEBUG:\n print ''\n print 'LD List is:'\n print ldlist\n print ''\n sys.stdout.flush()\n\n for path in ldlist:\n if os.path.exists( path ):\n\n if DEBUG:\n print 'Searching for shared libraries in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*.so*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*.so*' )\n #must be tidied for Windows (same below)\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with ls:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n #N.B. for Windows this should be a copy...\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n if DEBUG:\n print 'Searching for rootmap file in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with rootmap:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n if re.search( 'RELAX', lib ) is not None:\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n os.chdir( start )\n sys.stdout.flush()", "def load_linux_so():\n shared_name = get_project_root() / \"build/libastyle.so\"\n\n shared = str(pl.Path(shared_name).absolute())\n # file_ = {f for f in pl.Path().iterdir() if f.name == shared_name}\n\n try:\n libc = cdll.LoadLibrary(shared)\n except OSError as err:\n # \"cannot open shared object file: No such file or directory\"\n print(err)\n raise FileNotFoundError(\"Cannot find \" + shared)\n return libc", "def _prepare_ldpreload(container_dir, app):\n etc_dir = os.path.join(container_dir, 'overlay', 'etc')\n fs.mkdir_safe(etc_dir)\n new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')\n\n try:\n shutil.copyfile('/etc/ld.so.preload', new_ldpreload)\n except IOError as err:\n if err.errno != errno.ENOENT:\n raise\n _LOGGER.info('/etc/ld.so.preload not found, creating empty.')\n utils.touch(new_ldpreload)\n\n ldpreloads = []\n if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:\n treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')\n ldpreloads.append(treadmill_bind_preload)\n\n if not ldpreloads:\n return\n\n _LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)\n with io.open(new_ldpreload, 'a') as f:\n f.write('\\n'.join(ldpreloads) + '\\n')", "def link_binary(self, src, dst):\n cmd = [self.__command, \"--entry=\" + str(PlatformVar(\"entry\")), src, \"-o\", dst] + self.__linker_script\n (so, se) = run_command(cmd)\n if 0 < len(se) and is_verbose():\n print(se)\n return so", "def auto_load():\n if sys.startswith('linux'):\n pass", "def test_load_first(install_mockery, mock_fetch, mock_archive, mock_packages):\n install(\"[email protected]\")\n install(\"[email protected]\")\n\n # Now there are two versions of libelf, which should cause an error\n out = load(\"--sh\", \"libelf\", fail_on_error=False)\n assert \"matches multiple packages\" in out\n assert \"Use a more specific spec\" in out\n\n # Using --first should avoid the error condition\n load(\"--sh\", \"--first\", \"libelf\")", "def initialize_library():\n # return the handle to the shared object\n if os.name == \"nt\":\n pass\n # libc = load_windows_dll()\n else:\n libc = load_linux_so()\n return libc", "def setBEGINLIBPATH():\r\n dllpath = os.path.join(sys.prefix, \"Lib\", \"lib-dynload\")\r\n libpath = os.environ['BEGINLIBPATH'].split(';')\r\n if libpath[-1]:\r\n libpath.append(dllpath)\r\n else:\r\n libpath[-1] = dllpath\r\n os.environ['BEGINLIBPATH'] = ';'.join(libpath)", "def load_lib():\n root_dir = command.get_base_dirs(bin_dir)[0]\n _bin_dir, lib_dir = command.get_bin_lib_dirs(root_dir)\n magic_so = os.path.join(lib_dir, 'libmagic' + system.lib_ext)\n\n # add lib path to the front of the PATH env var\n new_path = os.pathsep.join([lib_dir, os.environ['PATH']])\n os.environ['PATH'] = new_path\n\n if os.path.exists(magic_so):\n lib = ctypes.CDLL(magic_so)\n if lib and lib._name:\n return lib\n raise ImportError('Failed to load libmagic from %(magic_so)r' % locals())", "def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n old_version = self.cfg.version\n self.default_starter_args = new_install_cfg.default_starter_args.copy()\n self.enterprise = new_install_cfg.enterprise\n self.replace_binary_setup_for_upgrade(new_install_cfg)\n with step(\"kill the starter processes of the old version\"):\n logging.info(\"StarterManager: Killing my instance [%s]\", str(self.instance.pid))\n self.kill_instance()\n with step(\"revalidate that the old arangods are still running and alive\"):\n self.detect_instance_pids_still_alive()\n if relaunch:\n with step(\"replace the starter binary with a new one,\" + \" this has not yet spawned any children\"):\n self.respawn_instance(new_install_cfg.version)\n logging.info(\"StarterManager: respawned instance as [%s]\", str(self.instance.pid))\n self.arangosh = None\n self.detect_arangosh_instances(new_install_cfg, old_version)", "def ld_linux_path(root):\n\n return os.path.join(root, 'lib', 'ld-linux-xpkg.so')", "def cwipc_realsense2_dll_load(libname : Optional[str]=None):\n global _cwipc_realsense2_dll_reference\n if _cwipc_realsense2_dll_reference: return _cwipc_realsense2_dll_reference\n \n with _cwipc_dll_search_path_collection(None) as loader:\n if libname == None:\n libname = 'cwipc_realsense2'\n if not os.path.isabs(libname):\n libname = loader.find_library(libname)\n if not libname:\n raise RuntimeError('Dynamic library realsense2 not found')\n assert libname\n _cwipc_realsense2_dll_reference = ctypes.CDLL(libname)\n if not _cwipc_realsense2_dll_reference:\n raise RuntimeError(f'Dynamic library {libname} cannot be loaded')\n \n _cwipc_realsense2_dll_reference.cwipc_realsense2.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_ulong]\n _cwipc_realsense2_dll_reference.cwipc_realsense2.restype = cwipc_tiledsource_p\n if hasattr(_cwipc_realsense2_dll_reference, 'cwipc_rs2offline'):\n _cwipc_realsense2_dll_reference.cwipc_rs2offline.argtypes = [cwipc_offline_settings, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p), ctypes.c_ulong]\n _cwipc_realsense2_dll_reference.cwipc_rs2offline.restype = cwipc_offline_p\n _cwipc_realsense2_dll_reference.cwipc_offline_free.argtypes = [cwipc_offline_p]\n _cwipc_realsense2_dll_reference.cwipc_offline_free.restype = None\n _cwipc_realsense2_dll_reference.cwipc_offline_get_source.argtypes = [cwipc_offline_p]\n _cwipc_realsense2_dll_reference.cwipc_offline_get_source.restype = cwipc_tiledsource_p\n _cwipc_realsense2_dll_reference.cwipc_offline_feed.argtypes = [cwipc_offline_p, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]\n _cwipc_realsense2_dll_reference.cwipc_offline_feed.restype = ctypes.c_bool\n\n return _cwipc_realsense2_dll_reference", "def dlopen(ffi, *names):\r\n for name in names:", "def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')", "def _reload(mod,larch=None,**kw):\n\n if isinstance(mod, str):\n return larch.import_module(mod, do_reload=True)\n\n for k,v in chain(larch.symtable._sys.modules.iteritems(), sys.modules.iteritems()):\n if v == mod:\n modname = k\n break\n try:\n return larch.import_module(modname,do_reload=True)\n except NameError:\n pass", "def android_patch() -> None:\n fname = 'src/cpython/Modules/Setup.dist'\n txt = efrotools.readfile(fname)\n\n # Need to switch some flags on this one.\n txt = efrotools.replace_one(txt, '#zlib zlibmodule.c',\n 'zlib zlibmodule.c -lz\\n#zlib zlibmodule.c')\n # Just turn all these on.\n for enable in [\n '#array arraymodule.c', '#cmath cmathmodule.c _math.c',\n '#math mathmodule.c', '#_contextvars _contextvarsmodule.c',\n '#_struct _struct.c', '#_weakref _weakref.c',\n '#_testcapi _testcapimodule.c', '#_random _randommodule.c',\n '#_elementtree -I', '#_pickle _pickle.c',\n '#_datetime _datetimemodule.c', '#_bisect _bisectmodule.c',\n '#_heapq _heapqmodule.c', '#_asyncio _asynciomodule.c',\n '#unicodedata unicodedata.c', '#fcntl fcntlmodule.c',\n '#select selectmodule.c', '#_csv _csv.c',\n '#_socket socketmodule.c', '#_blake2 _blake2/blake2module.c',\n '#binascii binascii.c', '#_posixsubprocess _posixsubprocess.c',\n '#_sha3 _sha3/sha3module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n if ENABLE_OPENSSL:\n txt = efrotools.replace_one(txt, '#_ssl _ssl.c \\\\',\n '_ssl _ssl.c -DUSE_SSL -lssl -lcrypto')\n else:\n # Note that the _md5 and _sha modules are normally only built if the\n # system does not have the OpenSSL libs containing an optimized\n # version.\n for enable in [\n '#_md5 md5module.c', '#_sha1 sha1module.c',\n '#_sha256 sha256module.c', '#_sha512 sha512module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n\n # Turn this off (its just an example module).\n txt = efrotools.replace_one(txt, 'xxsubtype xxsubtype.c',\n '#xxsubtype xxsubtype.c')\n\n # For whatever reason this stuff isn't in there at all; add it.\n txt += '\\n_json _json.c\\n'\n\n txt += '\\n_lzma _lzmamodule.c -llzma\\n'\n\n txt += ('\\n_sqlite3 -I$(srcdir)/Modules/_sqlite'\n ' -DMODULE_NAME=\\'\\\\\"sqlite3\\\\\"\\' -DSQLITE_OMIT_LOAD_EXTENSION'\n ' -lsqlite3 \\\\\\n'\n ' _sqlite/cache.c \\\\\\n'\n ' _sqlite/connection.c \\\\\\n'\n ' _sqlite/cursor.c \\\\\\n'\n ' _sqlite/microprotocols.c \\\\\\n'\n ' _sqlite/module.c \\\\\\n'\n ' _sqlite/prepare_protocol.c \\\\\\n'\n ' _sqlite/row.c \\\\\\n'\n ' _sqlite/statement.c \\\\\\n'\n ' _sqlite/util.c\\n')\n\n if ENABLE_OPENSSL:\n txt += '\\n\\n_hashlib _hashopenssl.c -DUSE_SSL -lssl -lcrypto\\n'\n\n txt += '\\n\\n*disabled*\\n_ctypes _crypt grp'\n\n efrotools.writefile(fname, txt)\n\n # Ok, this is weird.\n # When applying the module Setup, python looks for any line containing *=*\n # and interprets the whole thing a a global define?...\n # This breaks things for our static sqlite compile above.\n # The check used to look for [A-Z]*=* which didn't break, so let' just\n # change it back to that for now.\n fname = 'src/cpython/Modules/makesetup'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt, '\t\t*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;',\n '\t\t[A-Z]*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;')\n efrotools.writefile(fname, txt)\n\n print('APPLIED EFROTOOLS ANDROID BUILD PATCHES.')", "def set_ranlib(self):\n # Some systems don't have the ranlib command (e.g. SGIs).\n # In the case where ranlib is not present in the PATH,\n # echo is used instead of ranlib\n print \"Setting ranlib command...\",\n\n path=str(os.getenv('PATH')).split(os.pathsep)\n for i in path:\n if os.path.isfile(os.path.join(i,'ranlib')):\n self.config.ranlib=os.path.join(i,'ranlib')\n print self.config.ranlib\n return\n\n for i in path:\n if os.path.isfile(os.path.join(i,'echo')):\n self.config.ranlib=os.path.join(i,'echo')\n print self.config.ranlib\n return", "def generate_linker_script(self, dst, modify_start = False):\n (so, se) = run_command([self.__command, \"--verbose\"])\n if 0 < len(se) and is_verbose():\n print(se)\n match = re.match(r'.*linker script\\S+\\s*\\n=+\\s+(.*)\\s+=+\\s*\\n.*', so, re.DOTALL)\n if not match:\n raise RuntimeError(\"could not extract script from linker output\")\n ld_script = match.group(1)\n ld_script = re.sub(r'\\n([^\\n]+\\s)(_end|_edata|__bss_start)(\\s*=[^\\n]+)\\n', r'\\n\\1/*\\2\\3*/\\n', ld_script, re.MULTILINE)\n ld_script = re.sub(r'SEGMENT_START\\s*\\(\\s*(\\S+)\\s*,\\s*\\d*x?\\d+\\s*\\)', r'SEGMENT_START(\\1, %s)' % (str(PlatformVar(\"entry\"))), ld_script, re.MULTILINE)\n if modify_start:\n ld_script = re.sub(r'(SEGMENT_START.*\\S)\\s*\\+\\s*SIZEOF_HEADERS\\s*;', r'\\1;', ld_script, re.MULTILINE)\n fd = open(dst, \"w\")\n fd.write(ld_script)\n fd.close()\n if is_verbose():\n print(\"Wrote linker script '%s'.\" % (dst))\n return ld_script", "def build_linux(self, **kargs):\n self.linux_files = [\"%s/%s/wombat/vmlinux\" % (self.builddir, self.name)]\n LIB_DEPENDS = [self.libs[\"mutex\"][1]]\n LIB_DEPENDS += [self.libs[\"iguana\"][1]]\n LIB_DEPENDS += [self.libs[\"l4\"][1]]\n LIB_DEPENDS += [self.libs[\"timer\"][1]]\n LIB_DEPENDS += [self.libs[\"l4e\"][1]]\n LIB_DEPENDS += [self.libs[\"c\"][1]]\n LIB_DEPENDS += [self.libs[\"circular_buffer\"][1]]\n LIB_DEPENDS += [self.libs[\"ll\"][1]]\n LIB_DEPENDS += [self.libs[\"range_fl\"][1]]\n LIB_DEPENDS += [self.libs[\"naming\"][1]]\n\n if \"pxa\" in self.machine.drivers:\n LIB_DEPENDS += [self.libs[\"pxa\"][1]]\n \n l4linux = self.Command(self.linux_files, LIB_DEPENDS, buildlinux)\n l4linux = Flatten([l4linux])[0]\n Precious(self.linux_files)\n\n\twombat_cflags = \"-DENDIAN_%s \" % self.machine.endian.upper()\n\n\tif machine.pidreloc == True:\n wombat_cflags += \" -DARM_PID_RELOC \"\n\n if restrict_vm == True:\n wombat_cflags += \" -DCONFIG_RESTRICTED_VM=1 \"\n\n\tif (hasattr(machine, \"c_flags\")):\n\t wombat_cflags += ' '.join(machine.c_flags)\n\n # This is horrible :(\n mutex_include = os.getcwd() + os.sep + self.libs[\"mutex\"][0][0][1:]\n ig_include = os.getcwd() + os.sep + self.libs[\"iguana\"][0][0][1:]\n ig_idl4_include = self.libs[\"iguana\"][0][-1]\n l4_include = os.getcwd() + os.sep + self.libs[\"l4\"][0][0][1:]\n timer_include = os.getcwd() + os.sep + self.libs[\"timer\"][0][0][1:]\n cb_include = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][0][0][1:]\n idl4_include = os.getcwd() + os.sep + self.libs[\"idl4\"][0][1:] + os.sep\n naming_include = os.getcwd() + os.sep + self.libs[\"naming\"][0][0][1:] + os.sep\n \n mutex_lib = os.getcwd() + os.sep + self.libs[\"mutex\"][2][1:] + os.sep\n ig_lib = os.getcwd() + os.sep + self.libs[\"iguana\"][2][1:] + os.sep\n l4_lib = os.getcwd() + os.sep + self.libs[\"l4\"][2][1:] + os.sep\n timer_lib = os.getcwd() + os.sep + self.libs[\"timer\"][2][1:] + os.sep\n l4e_lib = os.getcwd() + os.sep + self.libs[\"l4e\"][2][1:] + os.sep\n c_lib = os.getcwd() + os.sep + self.libs[\"c\"][2][1:] + os.sep\n cb_lib = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][2][1:] + os.sep\n ll_lib = os.getcwd() + os.sep + self.libs[\"ll\"][2][1:] + os.sep\n rfl_lib = os.getcwd() + os.sep + self.libs[\"range_fl\"][2][1:] + os.sep\n naming_lib = os.getcwd() + os.sep + self.libs[\"naming\"][2][1:] + os.sep\n\n LIB_ARGS = \"\"\n LIB_ARGS += \" LIBL4_INCLUDE=%s\" % l4_include\n LIB_ARGS += \" LIBTIMER_INCLUDE=%s\" % timer_include\n LIB_ARGS += \" LIBCB_INCLUDE=%s\" % cb_include\n LIB_ARGS += \" IGUANA_INCLUDE=%s\" % ig_include\n LIB_ARGS += \" IGUANA_IDL_INCLUDE=%s\" % ig_idl4_include\n LIB_ARGS += \" IDL4_INCLUDE=%s\" % idl4_include\n LIB_ARGS += \" NAMING_INCLUDE=%s\" % naming_include\n LIB_ARGS += \" MUTEX_INCLUDE=%s\" % mutex_include\n if \"pxa\" in self.machine.drivers:\n pxa_include = os.getcwd() + os.sep + self.libs[\"pxa\"][0][0][1:] + os.sep\n LIB_ARGS += \" LIBPXA_INCLUDE=%s\" % pxa_include\n\n LIB_ARGS += \" LIBCDIR=%s\" % c_lib\n LIB_ARGS += \" LIBIGUANADIR=%s\" % ig_lib\n LIB_ARGS += \" LIBL4DIR=%s\" % l4_lib\n LIB_ARGS += \" LIBTIMERDIR=%s\" % timer_lib\n LIB_ARGS += \" LIBL4EDIR=%s\" % l4e_lib\n LIB_ARGS += \" LIBCBDIR=%s\" % cb_lib\n LIB_ARGS += \" LIBLLDIR=%s\" % ll_lib\n LIB_ARGS += \" LIBRANGE_FLDIR=%s\" % rfl_lib\n LIB_ARGS += \" LIBNAMINGDIR=%s\" % naming_lib\n LIB_ARGS += \" LIBMUTEXDIR=%s\" % mutex_lib\n if \"pxa\" in self.machine.drivers:\n pxa_lib = os.getcwd() + os.sep + self.libs[\"pxa\"][2][1:] + os.sep\n LIB_ARGS += \" LIBPXADIR=%s\" % pxa_lib\n\n l4linux.linux_build_cmd = \"make -C wombat O=%s/%s/wombat WOMBAT_CFLAGS=\\'%s\\' V=0 %s \" \\\n \"CROSS_COMPILE=%s \" % \\\n (self.builddir, self.name, wombat_cflags, LIB_ARGS, self.toolchain)\n\n if cleaning and os.path.exists(\"%s/%s/wombat\" % (self.builddir, self.name)):\n shutil.rmtree(\"%s/%s/wombat\" % (self.builddir, self.name))\n\n # As for pistachio we don't track the L4Linux dependencies so the\n # use needs to explicitly specify scons build_linux= to get L4Linux\n # rebuilt\n add_arg(\"build_linux\", \"Set this option if you want to rebuild Wombat on this build\", 0)\n if build_linux != 0:\n AlwaysBuild(l4linux)\n\n\tenv['EXPECT_TEST_DATA'] = [(\"Iguana init starting\", None),\n (\"Loading linux\", None),\n (\"Memory: \\d+k/\\d+k available\", None),\n (\"Please press Enter to activate this console.\", None)]\n\n return l4linux", "def replace_binary_setup_for_upgrade(self, new_install_cfg):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n self.cfg.set_directories(new_install_cfg)\n if self.cfg.hot_backup_supported:\n self.hotbackup_args = [\n \"--all.rclone.executable\",\n self.cfg.real_sbin_dir / \"rclone-arangodb\",\n ]", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def set_modprobe(self):\n if version_os[\"OS\"] == \"Debian\":\n self.exec_cmd(\"echo \\\"alias eth0 xennet\\\" >> %s/etc/modprobe.d/aliases\" % self.rep_vhosts_vm)\n else: \n self.exec_cmd(\"echo \\\"alias eth0 xennet\\\" >> %s/etc/modprobe.d/aliases.conf\" % self.rep_vhosts_vm)", "def _binaries_to_symbolize(self):\n raise NotImplementedError()", "def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None, **kwargs):\n compiler_output = kwargs.get(\"compiler_output\", None)\n arguments = kwargs.get(\"arguments\", [])\n version = kwargs.get(\"version\", None)\n compat_version = kwargs.get(\"compat_version\", version)\n\n if not shared_lib:\n shared_lib = \"{0}.{1}\".format(os.path.splitext(static_lib)[0], dso_suffix)\n\n compiler_args = []\n\n # TODO: Compiler arguments should not be hardcoded but provided by\n # the different compiler classes.\n if \"linux\" in arch or \"cray\" in arch:\n soname = os.path.basename(shared_lib)\n\n if compat_version:\n soname += \".{0}\".format(compat_version)\n\n compiler_args = [\n \"-shared\",\n \"-Wl,-soname,{0}\".format(soname),\n \"-Wl,--whole-archive\",\n static_lib,\n \"-Wl,--no-whole-archive\",\n ]\n elif \"darwin\" in arch:\n install_name = shared_lib\n\n if compat_version:\n install_name += \".{0}\".format(compat_version)\n\n compiler_args = [\n \"-dynamiclib\",\n \"-install_name\",\n \"{0}\".format(install_name),\n \"-Wl,-force_load,{0}\".format(static_lib),\n ]\n\n if compat_version:\n compiler_args.extend([\"-compatibility_version\", \"{0}\".format(compat_version)])\n\n if version:\n compiler_args.extend([\"-current_version\", \"{0}\".format(version)])\n\n if len(arguments) > 0:\n compiler_args.extend(arguments)\n\n shared_lib_base = shared_lib\n\n if version:\n shared_lib += \".{0}\".format(version)\n elif compat_version:\n shared_lib += \".{0}\".format(compat_version)\n\n compiler_args.extend([\"-o\", shared_lib])\n\n # Create symlinks for version and compat_version\n shared_lib_link = os.path.basename(shared_lib)\n\n if version or compat_version:\n symlink(shared_lib_link, shared_lib_base)\n\n if compat_version and compat_version != version:\n symlink(shared_lib_link, \"{0}.{1}\".format(shared_lib_base, compat_version))\n\n return compiler(*compiler_args, output=compiler_output)", "def _server_env(load_library, work_path=None):\n if work_path:\n temp = work_path\n else:\n temp = utils.tempdir()\n\n # pylint: disable=unused-variable\n @tvm._ffi.register_func(\"tvm.rpc.server.workpath\", override=True)\n def get_workpath(path):\n return temp.relpath(path)\n\n @tvm._ffi.register_func(\"tvm.rpc.server.load_module\", override=True)\n def load_module(file_name):\n \"\"\"Load module from remote side.\"\"\"\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m\n\n @tvm._ffi.register_func(\"tvm.rpc.server.download_linked_module\", override=True)\n def download_linked_module(file_name):\n \"\"\"Load module from remote side.\"\"\"\n # pylint: disable=import-outside-toplevel\n path = temp.relpath(file_name)\n\n if path.endswith(\".o\"):\n # Extra dependencies during runtime.\n from tvm.contrib import cc as _cc\n\n _cc.create_shared(path + \".so\", path)\n path += \".so\"\n elif path.endswith(\".tar\"):\n # Extra dependencies during runtime.\n from tvm.contrib import cc as _cc, tar as _tar\n\n tar_temp = utils.tempdir(custom_path=path.replace(\".tar\", \"\"))\n _tar.untar(path, tar_temp.temp_dir)\n files = [tar_temp.relpath(x) for x in tar_temp.listdir()]\n _cc.create_shared(path + \".so\", files)\n path += \".so\"\n elif path.endswith(\".dylib\") or path.endswith(\".so\"):\n pass\n else:\n raise RuntimeError(f\"Do not know how to link {file_name}\")\n logger.info(\"Send linked module %s to client\", path)\n return bytearray(open(path, \"rb\").read())\n\n libs = []\n load_library = load_library.split(\":\") if load_library else []\n for file_name in load_library:\n file_name = find_lib_path(file_name)[0]\n libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))\n logger.info(\"Load additional library %s\", file_name)\n temp.libs = libs\n return temp" ]
[ "0.6080508", "0.59798527", "0.5882375", "0.5829561", "0.5821797", "0.58100384", "0.57235426", "0.5645031", "0.5568604", "0.54718274", "0.53370386", "0.52522343", "0.5210637", "0.5204081", "0.52006644", "0.51943845", "0.5176367", "0.5163168", "0.5103557", "0.50893724", "0.5061901", "0.5055773", "0.50479585", "0.50438887", "0.50338936", "0.49600917", "0.4944621", "0.49424452", "0.49385715", "0.49261823" ]
0.66148263
0
Reloads settings and stops the sensor daemon if necessary
def reload_settings(): global settings, cancel_thread # cancel the thread if the settings say so if cancel_thread is None: if settings.get('disabled') is False: cancel_thread = start_thread() else: if settings.get('disabled') is True: light_scheme_set = None current_timeout = 0 cancel_thread() cancel_thread = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def reload_config(self):\n pass", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def open_settings(self, event):\n settings_dialog = cfg.SettingsDialog(parent=self, exclude=['window'])\n res = settings_dialog.ShowModal()\n if res == wx.ID_OK:\n # Reload relevant parts of app\n restart_monitor_timer = False\n restart_gui_timer = False\n reload_correlations = False\n reload_logger = False\n reload_graph = False\n\n for setting in settings_dialog.changed_settings:\n # If any 'monitor.' settings except 'monitor.divergence_threshold have changed then restart\n # monitoring timer with new settings.\n # If 'monitor.interval has changed then restart gui timer.\n # If 'monitor.monitoring_threshold' has changed, then refresh correlation data.\n # If any 'logging.' settings have changed, then reload logger config.\n if setting.startswith('monitor.') and setting != 'monitor.divergence_threshold':\n restart_monitor_timer = True\n if setting == 'monitor.interval':\n restart_gui_timer = True\n if setting == 'monitor.monitoring_threshold':\n reload_correlations = True\n if setting.startswith('logging.'):\n reload_logger = True\n if setting.startswith('monitor.calculations'):\n reload_graph = True\n\n # Now perform the actions\n if restart_monitor_timer:\n self.__log.info(\"Settings updated. Reloading monitoring timer.\")\n self.__cor.stop_monitor()\n\n # Build calculation params and start monitor\n calculation_params = [self.__config.get('monitor.calculations.long'),\n self.__config.get('monitor.calculations.medium'),\n self.__config.get('monitor.calculations.short')]\n\n self.__cor.start_monitor(interval=self.__config.get('monitor.interval'),\n calculation_params=calculation_params,\n cache_time=self.__config.get('monitor.tick_cache_time'),\n autosave=self.__config.get('monitor.autosave'),\n filename=self.__opened_filename)\n\n if restart_gui_timer:\n self.__log.info(\"Settings updated. Restarting gui timer.\")\n self.timer.Stop()\n self.timer.Start(self.__config.get('monitor.interval') * 1000)\n\n if reload_correlations:\n self.__log.info(\"Settings updated. Updating monitoring threshold and reloading grid.\")\n self.__cor.monitoring_threshold = self.__config.get(\"monitor.monitoring_threshold\")\n self.__refresh_grid()\n\n if reload_logger:\n self.__log.info(\"Settings updated. Reloading logger.\")\n log_config = cfg.Config().get('logging')\n logging.config.dictConfig(log_config)\n\n if reload_graph:\n self.__log.info(\"Settings updated. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])", "def _restart(self):\n\n daemon_prefix = ConfigUtil().get_prefix_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n\n if daemon_prefix:\n if self._daemon_id in self._pyswitchlib_conf:\n daemon_prefixes = self._pyswitchlib_conf[self._daemon_id].split(':')\n\n if len(daemon_prefixes) > 1:\n daemon_prefixes.remove(daemon_prefix)\n daemon_prefixes.insert(0, daemon_prefix)\n\n self._pyswitchlib_conf[self._daemon_id] = ':'.join(daemon_prefixes)\n ConfigFileUtil().write(filename=pyswitchlib_conf_file, conf_dict=self._pyswitchlib_conf)\n\n super(PySwitchLibApiDaemonRunner, self)._restart()", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])", "def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def reload(self):\n self.load_config()\n # Seems we need to explicitly refresh this\n if self.main_instance:\n self.main_instance.config = self.config", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def restart_all(self):\n self.threadpool.waitForDone()\n self.update_console(\"UI paused- for restart\")\n self.uiTimer.stop()\n\n self.workerTimer.stop()\n self.update_console(\"Configuration changed - restarting everything\")\n self.chbxProcess.setEnabled(False)\n self.chbxProcess.setChecked(False)\n self.btnSettings.setEnabled(False)\n self.ibkrworker.app.disconnect()\n while self.ibkrworker.app.isConnected():\n print(\"waiting for disconnect\")\n time.sleep(1)\n\n self.ibkrworker = None\n self.ibkrworker = IBKRWorker(self.settings)\n self.connect_to_ibkr()\n\n i = 4", "def _reloadFabric(self, fabric):\n\n # Execute command to poweroff/on\n self.device.configure(\n 'poweroff xbar {}\\nno poweroff xbar {}'.format(fabric, fabric))", "def reconfigure(self):\n log.debug('Reconfiguring and restarting the DHCP daemon...')\n\n # Don't set the daemon running status here, but let the status\n # check take care of that.\n\n p = Properties(self.storage, CONFIG_SECTION)\n p.addCallback(self.changed).\\\n addCallback(lambda trigger: p.load()).\\\n addCallback(self.emit_config, p).\\\n addCallback(self.restart_daemon).\\\n addErrback(self.restart_error)", "def reset(self):\n if self.arduino:\n self.arduino.stop()\n\n time.sleep(.5)\n\n self.arduino = arduino.find_arduino(self.arduino_serial)\n self.arduino.start_monitor()\n\n self.driver.stop()\n self.last_control = time.time()", "def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def quit(self):\n self.is_running = False\n # lösche callbacks\n self.clear_on_config_change()", "def reload_config(self):\n if self.faucet is not None:\n self.faucet.reload_config(None)", "def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()", "def restart(self):\n self.iic.set_flag(REG.CTRL_REG2.RST)\n time.sleep(0.01)\n self.conf = Configuration()", "def _refresh_discovery(self):\n if self.terminate_flag:\n return\n\n self.devices = discover_drones(self.ip_range, self.skyserve_port)\n time.sleep(self.refresh_interval / 1000)\n self._refresh_discovery()", "def refresh_configuration(self):\n pass", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def acShutdown():\n # Update config if necessary\n if cfg.update_cfg:\n cfg.save()", "def stop(self):\n self._unbind_observers()\n self._pref_decls.clear()\n pref_path = os.path.join(self.default_folder, self.default_file)\n try:\n prefs = ConfigObj()\n prefs.update(self._prefs)\n prefs.filename = pref_path\n prefs.write()\n except Exception:\n print 'Invalid pref path'\n\n def_path = os.path.join(MODULE_PATH, 'default.ini')\n try:\n defaults = ConfigObj(def_path)\n defaults['folder'] = self.default_folder\n defaults['file'] = self.default_file\n defaults.write()\n except Exception:\n print 'Invalid default pref path'", "def handleReload(self, confInfo=None):", "def cleanup_revpi(self):\n self.rpi.core.a1green.value = False\n self.rpi.core.a1red.value = False\n self.rpi.io.main_relay.value = False\n self.rpi.io.relay_1.value = False\n self.rpi.io.relay_2.value = False\n self.rpi = revpimodio2.RevPiModIO(autorefresh=False)\n self.opc_server.stop()\n print(\"exit\")\n # self.master.destroy()\n exit(1)", "def stop_periodic_update(self) -> None:\n self._stop_periodic_update()", "def reload():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"gracefully reloading changes\")\n xd.send_signal(signal.SIGHUP)\n else:\n click.echo(\"xkcd service not running\")", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)" ]
[ "0.6269248", "0.6228141", "0.6084714", "0.6025635", "0.59918356", "0.5985085", "0.59474224", "0.5947158", "0.5941444", "0.59340006", "0.59218776", "0.5914287", "0.58856106", "0.5847929", "0.5837395", "0.5835373", "0.5829937", "0.5820057", "0.5804429", "0.58007795", "0.5788595", "0.57838416", "0.57476926", "0.57446617", "0.57392204", "0.5730364", "0.5724934", "0.570841", "0.5692088", "0.56920034" ]
0.7108176
0
Repeatedly calls a function with a set interval and returns a thread cancel handle
def call_repeatedly(interval, function, args): stopped = threading.Event() def loop(): while not stopped.wait(interval): function(**args) threading.Thread(target=loop).start() # return the thread closing handle return stopped.set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __set_interval(self, func, sec):\n\n def func_wrapper():\n self.__set_interval(func, sec)\n func()\n\n t = threading.Timer(sec, func_wrapper)\n t.start()\n return t", "def interval(cls, timeout, f, immediate = False, **kwargs):\n def _interval(*args, **kwargs):\n if immediate: f(*args, **kwargs)\n while True:\n cls.sleep(timeout)\n try:\n f(*args, **kwargs)\n except TaskletExit:\n break\n except:\n logging.exception(\"unhandled exception in Tasklet.interval\")\n return cls.new(_interval, **kwargs)", "def schedule(function_pointer: Callable, interval: float):\n pyglet.clock.schedule_interval(function_pointer, interval)", "def respect_interval(interval, function):\n def wrapper():\n last_called = clock()\n\n while True:\n now = clock()\n dt = now - last_called\n\n if dt >= interval:\n function()\n last_called = now\n\n yield\n\n return wrapper().__next__", "def __timeout(self, seconds, func, *args):\n t = threading.Timer(seconds, func, *args)\n self._timer = t\n t.start()", "async def _async_repeat(self, func):\n while True:\n start = time.monotonic()\n await func()\n timeout = (self.period/1000.) - (time.monotonic()-start)\n if timeout > 0:\n await asyncio.sleep(timeout)", "def run_continuously(self, interval: int = 1):\n cease_continuous_run = threading.Event()\n\n class ScheduleThread(threading.Thread):\n @classmethod\n def run(cls):\n while not cease_continuous_run.is_set():\n schedule.run_pending()\n time.sleep(interval)\n\n continuous_thread = ScheduleThread()\n continuous_thread.start()\n return cease_continuous_run", "def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()", "def on_interval(interval=0.0):\n\n def decorator(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n while True:\n start_time = time.time()\n await func(*args, **kwargs)\n elapsed = time.time() - start_time\n await asyncio.sleep(max(0, interval - elapsed))\n\n wrapper._is_interval_task = True\n return wrapper\n\n return decorator", "def loop(self, function, *args, **kwargs):\n loop = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, loop, True)\n self._callbacks.append(timer)\n\n return timer", "def start(self, sec, callFunc, *args, **kwargs):\n self.cancel()\n \n def doit(args=args, kwargs=kwargs):\n self._timerID = None\n callFunc(*args, **kwargs)\n\n self._timerID = self._tkWdg.after(int(0.5 + (1000.0 * sec)), doit)", "def __init__(self, interval, func, **kwargs):\n threading.Thread.__init__(self, name=\"PeriodicExecutor\")\n self.setDaemon(1)\n self._finished = threading.Event()\n self._interval = interval\n self._func = func\n self._params = kwargs", "def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.function = function\n self.interval = interval\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()", "def run_no_args(self):\n while True:\n if self.cancelled:\n return\n self.func()\n time.sleep(self.sleep_time / 1000.00)", "def wrapper(self, *args, **kwargs):\n if self.afterid:\n self.master.after_cancel(self.afterid)\n function(self, *args, **kwargs)\n self.afterid = self.master.after(5000, self.cycle)", "def _ontimer(self, fun, t):\n if t == 0:\n self.cv.after_idle(fun)\n else:\n self.cv.after(t, fun)", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def ontimer(self, fun, t=0):\n self._ontimer(fun, t)", "def timeout_handler(interval, recurring = None):\n def decorator(func):\n \"\"\"The decorator\"\"\"\n func._pyxmpp_timeout = interval\n func._pyxmpp_recurring = recurring\n return func\n return decorator", "def Schedule(interval=3600):\n # TODO:: if func need to return something\n def schedule(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n while True:\n start_time = time.perf_counter()\n func(*args, **kwargs)\n running_time = time.perf_counter()-start_time\n latency = interval-running_time\n logger.debug(\n f'Next program:{func.__name__} will start at {latency} sec later.')\n time.sleep(latency)\n return wrapper(*args, **kwargs)\n return wrapper\n return schedule", "async def _run(self) -> None:\n while True:\n try:\n await self._func()\n except asyncio.CancelledError:\n raise\n except Exception:\n logging.exception('Executing periodic task.')\n await asyncio.sleep(self._ival)", "def alarm(self, interval, call):", "def clockit(func):\n def new(*args, **kw):\n t = Timer()\n retval = func(*args, **kw)\n t.stop()\n print(\"{} in {}\".format(func.__name__, t))\n del t\n return retval\n return new", "async def cleanup_run(self, interval):\n # Reference workaround: when a Ratelimit object has been deleted,\n # this coroutine would still hold a reference to it.\n self = weakref.proxy(self)\n\n while True:\n try:\n await asyncio.sleep(interval)\n self.cleanup()\n except (ReferenceError, asyncio.CancelledError):\n break", "def setInterval(interval, times = -1):\n\n # Validate the parameters.\n if isinstance(interval, int):\n interval = float(interval)\n elif not isinstance(interval, float):\n raise TypeError(\"Expected int or float, got %r instead\" % type(interval))\n if not isinstance(times, int):\n raise TypeError(\"Expected int, got %r instead\" % type(times))\n\n # Code adapted from: http://stackoverflow.com/q/5179467\n\n # This will be the actual decorator,\n # with fixed interval and times parameter\n def outer_wrap(function):\n if not callable(function):\n raise TypeError(\"Expected function, got %r instead\" % type(function))\n\n # This will be the function to be\n # called\n def wrap(*args, **kwargs):\n\n stop = Event()\n\n # This is another function to be executed\n # in a different thread to simulate setInterval\n def inner_wrap():\n i = 0\n while i != times and not stop.isSet():\n stop.wait(interval)\n function(*args, **kwargs)\n i += 1\n\n t = Timer(0, inner_wrap)\n t.daemon = True\n t.start()\n\n return stop\n\n return wrap\n\n return outer_wrap", "def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):\n import threading\n\n class InterruptableThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = default\n\n def run(self):\n try:\n self.result = func(*args, **kwargs)\n except:\n pass\n\n it = InterruptableThread()\n it.start()\n it.join(timeout_duration)\n return it.result", "def func_with_timeout(*args, **kwargs):\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)", "def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None): \n import threading\n class InterruptableThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = default\n def run(self):\n self.result = func(*args, **kwargs)\n it = InterruptableThread()\n it.start()\n it.join(timeout_duration)\n if it.isAlive():\n return it.result\n else:\n return it.result", "def timer_thread_function():\n while True:\n for i, timer in enumerate(superglobals.timer_list):\n if timer.seconds - time.perf_counter() <= 0 and timer.bits & 0b01:\n superglobals.timer_list[i].bits &= 0b10\n for _ in range(10):\n curses.beep()\n time.sleep(0.05)\n for i, countdown in enumerate(superglobals.countdown_list):\n if countdown.seconds - time.perf_counter() <= 0 and \\\n countdown.bits & 0b01:\n superglobals.countdown_list[i].bits &= 0b00\n for _ in range(10):\n curses.beep()\n time.sleep(0.05)", "def cycle(self, interval, function, *args, **kwargs):\n if interval <= 0:\n raise ValueError(\"Interval must be greater than 0 seconds.\")\n\n cycle = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, cycle, True, interval, self.latest_poll_time + interval)\n bisect.insort(self._deferreds, timer)\n\n return timer" ]
[ "0.67053866", "0.66891724", "0.64283514", "0.6361471", "0.63328284", "0.6324299", "0.6230495", "0.611252", "0.598195", "0.5951146", "0.59462386", "0.5921696", "0.59215885", "0.5894177", "0.5880573", "0.58379", "0.5750367", "0.57432234", "0.56841433", "0.5684028", "0.56767637", "0.566983", "0.56529313", "0.56467557", "0.56367505", "0.56204146", "0.56093454", "0.5604419", "0.5585623", "0.5538338" ]
0.7836953
0
Reads the light sensor data and changes user's color scheme if the reading is above a threshold
def read_sensor_data(): global light_scheme_set, current_timeout # prevents very rapid changes of the color scheme if current_timeout is not 0: current_timeout -= 1 return else: # call the shared library's sensor code reading = dll.readSensor() scheme = None # check if the scheme needs to be changed if reading >= settings.get('threshold') and light_scheme_set is not True: scheme = settings.get('light_color_scheme') light_scheme_set = True elif reading < settings.get('threshold') and light_scheme_set is not False: scheme = settings.get('dark_color_scheme') light_scheme_set = False # change user settings if scheme is not None: global_settings = sublime.load_settings('Preferences.sublime-settings') if global_settings.get('color_scheme') != scheme: global_settings.set('color_scheme', scheme) sublime.save_settings('Preferences.sublime-settings') current_timeout = settings.get('cycle_timeout')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down", "def check_light(light: pykulersky.Light):\n light.connect()\n light.get_color()", "def update(self):\n try:\n if not self._light.connected:\n self._light.connect()\n # pylint: disable=invalid-name\n r, g, b, w = self._light.get_color()\n except pykulersky.PykulerskyException as exc:\n if self._available:\n _LOGGER.warning(\"Unable to connect to %s: %s\", self._light.address, exc)\n self._available = False\n return\n if not self._available:\n _LOGGER.info(\"Reconnected to %s\", self.entity_id)\n self._available = True\n\n hsv = color_util.color_RGB_to_hsv(r, g, b)\n self._hs_color = hsv[:2]\n self._brightness = int(round((hsv[2] / 100) * 255))\n self._white_value = w", "def getLightSensor() -> int:\n pass", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def update(self):\n # Light sensor reading: 16-bit integer\n self.light = self.envirophat.light.light()\n if self.use_leds:\n self.envirophat.leds.on()\n # the three color values scaled against the overall light, 0-255\n self.light_red, self.light_green, self.light_blue = self.envirophat.light.rgb()\n if self.use_leds:\n self.envirophat.leds.off()\n\n # accelerometer readings in G\n (\n self.accelerometer_x,\n self.accelerometer_y,\n self.accelerometer_z,\n ) = self.envirophat.motion.accelerometer()\n\n # raw magnetometer reading\n (\n self.magnetometer_x,\n self.magnetometer_y,\n self.magnetometer_z,\n ) = self.envirophat.motion.magnetometer()\n\n # temperature resolution of BMP280 sensor: 0.01°C\n self.temperature = round(self.envirophat.weather.temperature(), 2)\n\n # pressure resolution of BMP280 sensor: 0.16 Pa, rounding to 0.1 Pa\n # with conversion to 100 Pa = 1 hPa\n self.pressure = round(self.envirophat.weather.pressure() / 100.0, 3)\n\n # Voltage sensor, reading between 0-3.3V\n (\n self.voltage_0,\n self.voltage_1,\n self.voltage_2,\n self.voltage_3,\n ) = self.envirophat.analog.read_all()", "def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))", "def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light", "def light(brightness, filter):\n brightness = clamp(MIN_BRIGHTNESS, round(brightness), MAX_BRIGHTNESS)\n for col in range(DISPLAY_WIDTH):\n for row in range(DISPLAY_HEIGHT):\n if filter(col, row):\n microbit.display.set_pixel(col, row, brightness)", "def __init__(self, light: pykulersky.Light):\n self._light = light\n self._hs_color = None\n self._brightness = None\n self._white_value = None\n self._available = True", "def __init__(self, device: SensemeDevice) -> None:\n super().__init__(device, f\"{device.name} Light\")\n self._attr_supported_color_modes = {ColorMode.COLOR_TEMP}\n self._attr_color_mode = ColorMode.COLOR_TEMP\n self._attr_min_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_max\n )\n self._attr_max_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_min\n )", "def update_lights(self, light_data):\n self.current_brightness = self.brightness\n self.brightness = light_data.get('brightness')\n self.power_state = light_data.get('power_state')\n self._update_board()", "def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0", "def traffic_light_cb(self, msg):\n\n # Save the traffic light array\n self.lights = msg.lights", "def lightness_correction(self):\n points = self.color_lookup_table_points\n lightness_max_value = math.sqrt(3 * (255**2))\n deadpool = list()\n for index, point in enumerate(points[0]):\n point = self.get_value_tuple(index)\n lightness = int(math.sqrt(point[0]**2 + point[1]**2 + point[2]**2) * 255 / lightness_max_value)\n if not self.to_dark < lightness < self.to_bright:\n deadpool.append(index)\n self.color_lookup_table_points = (np.delete(points[0], deadpool),\n np.delete(points[1], deadpool),\n np.delete(points[2], deadpool))\n self.point_count = len(self.color_lookup_table_points[0])", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])", "def light_row(row):\n def filter_row(col, rw):\n \"\"\"For a given pixel position, turn on if it matches our row\n \"\"\"\n return rw == row\n light(MAX_BRIGHTNESS, filter_row)", "def update(self) -> None:\n state = int(self._light.is_on())\n self._state = bool(state)\n self._brightness = to_hass_level(state)", "def determine_intensity_single_channel(pi, pin_light, i2c_multiplexer_handle, i2c_sensor_handle, channel_number):\n pi.write(pin_light, 1)\n i2c_multiplexer_select_channel(pi,\n i2c_multiplexer_handle, channel_number)\n intensity = i2c_sensor_handle.ch0_light\n timepoint = time.time()\n time.sleep(0.25)\n pi.write(pin_light, 0)\n return timepoint, intensity", "def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)", "def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('[email protected]', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)", "def __lightness(self, color):\n hsv = color.toHsv()\n return hsv.valueF()", "def update(self) -> None:\n self._light.update()\n self._state = self._light.is_on()\n self._brightness = self._light.brightness", "def illuminance_sensor():\n\n\tsensor_name = \"illuminance\"\n\treg_addr = 26\n\tdata_len = 1\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Illuminance\", Illuminance, 2)\n\tresult = data.illuminance\n\n\tdelete_sensor(sensor_name)\n\treturn result", "def brightness(self):\n _LOGGER.error(\"inside brightness\")\n url = self.urlx + '/dimstate'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = int(int(json_data['dimState'])*1.5)\n\n # if int(self._dimmer) < 170:\n self._dimmer = state\n\n return self._dimmer", "def high_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri + 50 \r\n if bri > 255:\r\n bri = 255 \r\n self.b.set_light(light,'bri',bri)", "def lightness(color):\n\n strongest = max(color.red, color.green, color.blue)\n weakest = min(color.red, color.green, color.blue)\n return 0.5 * (strongest + weakest) / 255", "async def __async_process_light_packet(self, light_data, color_space):\n light_id = str(light_data[1] + light_data[2])\n light_conf = await self.config.async_get_light_config(light_id)\n\n # throttle command to light\n # TODO: can we send udp messages to supported lights such as esphome or native ZHA ?\n # For now we simply unpack the entertainment packet and forward\n # individual commands to lights by calling hass services.\n throttle_ms = light_conf.get(\"throttle\", DEFAULT_THROTTLE_MS)\n if not self.__update_allowed(light_id, light_data, throttle_ms):\n return\n\n entity_id = light_conf[\"entity_id\"]\n svc_data = {\"entity_id\": entity_id}\n if color_space == COLOR_TYPE_RGB:\n svc_data[HASS_ATTR_RGB_COLOR] = [\n int((light_data[3] * 256 + light_data[4]) / 256),\n int((light_data[5] * 256 + light_data[6]) / 256),\n int((light_data[7] * 256 + light_data[8]) / 256),\n ]\n svc_data[HASS_ATTR_BRIGHTNESS] = sum(svc_data[HASS_ATTR_RGB_COLOR]) / len(\n svc_data[HASS_ATTR_RGB_COLOR]\n )\n else:\n svc_data[HASS_ATTR_XY_COLOR] = [\n float((light_data[3] * 256 + light_data[4]) / 65535),\n float((light_data[5] * 256 + light_data[6]) / 65535),\n ]\n svc_data[HASS_ATTR_BRIGHTNESS] = int(\n (light_data[7] * 256 + light_data[8]) / 256\n )\n\n # update allowed within throttling, push to light\n if throttle_ms:\n svc_data[HASS_ATTR_TRANSITION] = throttle_ms / 1000\n else:\n svc_data[HASS_ATTR_TRANSITION] = 0\n await self.hue.hass.call_service(\"light\", \"turn_on\", svc_data)\n self.hue.hass.states[entity_id][\"attributes\"].update(svc_data)", "def test_light_sensor(self):\n with patch.dict(TYPES, {'LightSensor': self.mock_type}):\n state = State('sensor.light', '900',\n {ATTR_DEVICE_CLASS: 'illuminance'})\n get_accessory(None, state, 2, {})", "def traffic_cb(self, msg):\n prev_red_light_waypoint = self.red_light_waypoint\n self.red_light_waypoint = msg.data if msg.data >= 0 else None\n if prev_red_light_waypoint != self.red_light_waypoint:\n if debugging:\n rospy.loginfo(\"TrafficLight changed: %s\", str(self.red_light_waypoint))\n if publish_on_light_change:\n self.update_and_publish() # Refresh if next traffic light has changed" ]
[ "0.63356465", "0.6309102", "0.6276284", "0.6237545", "0.60673153", "0.606334", "0.59688765", "0.5941587", "0.59411585", "0.5939015", "0.59064615", "0.589552", "0.58872604", "0.583961", "0.5819576", "0.58051085", "0.5769483", "0.5767761", "0.5735046", "0.5708469", "0.5697487", "0.5684011", "0.5676918", "0.56627905", "0.56546307", "0.5627313", "0.5624116", "0.56178224", "0.5611094", "0.5609022" ]
0.7226849
0
Add a version to a template version manager.
def insert(template_version_manager, template, request): # save the template in database template_api.upsert(template) try: # insert the initial template in the version manager version_manager_api.insert_version( template_version_manager, template, request=request ) # insert the version manager in database version_manager_api.upsert(template_version_manager, request=request) # get template display name display_name = get_latest_version_name(template_version_manager) # update saved template main_template_api.set_display_name(template, display_name, request=request) # return version manager return template_version_manager except Exception as e: main_template_api.delete(template, request=request) raise e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, bento_name, bento_version):", "def add_version(self, id, name, files):\n v = Version(self.backend, self.path, id, name, files)\n v.save()\n\n # Do not put duplicated version IDs into the index.\n for v in self.versions:\n if v['id'] == id:\n return v\n\n self.versions.append(dict(id=id, name=name))\n self.save_index()\n return v", "def command_new_version(self):\n repoinit.new_version(*self.args())", "def increment_version_on_insert(obj):\n history_model = obj.previous_version()\n\n if history_model is not None:\n obj.version = history_model.version + 1", "def add(self):\r\n self._svn('add')", "def upsert(version_manager, request):\n return version_manager.save_version_manager()", "def insert(version: str, file: str, force: bool, compute: bool):\n timeline.insert_version(version, file, force, compute)", "def CreateVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create_initial_version_after_adding(context, event):\n\n pr = getToolByName(context, \"portal_repository\", None)\n if pr is None:\n # This can happen, e.g., when adding a Plone Site with versioning\n # and portal_repository is not yet created\n return\n\n if not pr.isVersionable(context):\n # object is not versionable\n return\n\n if not pr.supportsPolicy(context, \"at_edit_autoversion\"):\n # automatic versioning disabled for this portal type, so we don't\n # need to create an initial version\n return\n\n # get the change not\n default_changeNote = _(\"initial_version_changeNote\", default=\"Initial version\")\n if getattr(context, \"REQUEST\", None):\n changeNote = get_change_note(context.REQUEST, default_changeNote)\n else:\n changeNote = None\n\n changed = False\n if not base_hasattr(context, \"version_id\"):\n # no initial version, let's create one..\n changed = True\n\n else:\n try:\n changed = not pr.isUpToDate(context, context.version_id)\n except ArchivistUnregisteredError:\n # The object is not actually registered, but a version is\n # set, perhaps it was imported, or versioning info was\n # inappropriately destroyed\n changed = True\n\n if not changed:\n return\n\n try:\n context.portal_repository.save(obj=context, comment=changeNote)\n except FileTooLargeToVersionError:\n pass # the on edit save will emit a warning", "def do_create_version(**kwargs):\n version_params = {\n \"name\": kwargs['dag_run'].conf.get('model_version'),\n \"description\": 'Version 1',\n \"runtimeVersion\": kwargs['dag_run'].conf.get('tf_version'),\n \"deploymentUri\": 'gs://{}/{}'.format(COMPOSER_BUCKET_NAME, PREFIX_FINAL_MODEL)\n }\n\n ti = kwargs['ti']\n\n mle = MLEngineHook()\n\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = ti.xcom_pull(key='model_versions', task_ids='list_versions')\n\n version_path = 'projects/{}/models/{}/versions/{}'.format(PROJECT,\n model_name,\n version_params['name'])\n\n if version_path in [v['name'] for v in model_versions]:\n logging.info(\"Delete previously version of the model to overwrite.\")\n mle.delete_version(PROJECT, model_name, version_params['name'])\n\n mle.create_version(PROJECT, model_name, version_params)", "def new_version(self, latest_version_id: uplink.Path(name=\"id\")):\n pass", "def set_version(self, bundle, ctx, filename, version):", "def switch_to_version(self, version):\n self.current_version = version\n self.save()", "def _set_version(self, version):\n with self.db.atomic():\n JambiModel.delete().execute()\n JambiModel.create(ref=str(version))\n self.logger.debug('Set jambi version to {}'.format(version))", "def _set_database_version(db, version):\n if not isinstance(version, int):\n raise TypeError(\"Version must be integer, not %s : %s\" % (\n version, type(version)))\n create_metadata = \\\n \"CREATE TABLE %s (version INT)\" % METADATA_COLUMN_NAME\n execute_sql(db, create_metadata)\n insert_version = \\\n \"INSERT INTO %s VALUES (%s)\" % (METADATA_COLUMN_NAME, version)\n execute_sql(db, insert_version)", "def version(self, version):\n \n self._version = version", "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def new(versionsPath, versionPattern=''):\n if not versionPattern:\n versionPattern = os.environ.get('KOMBI_VERSION_PATTERN', DEFAULT_VERSION_PATTERN)\n\n version = __queryLatest(versionsPath, versionPattern)\n return label(\n version + 1,\n versionPattern\n )", "def add_new_package_version(self, package, version,\n released=None, skip=''):\n with self._conn.begin():\n if released is None:\n released = datetime.now(tz=UTC)\n return self._conn.execute(\n \"VALUES (add_new_package_version(%s, %s, %s, %s))\",\n (package, version,\n released.astimezone(UTC).replace(tzinfo=None), skip)\n ).scalar()", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version" ]
[ "0.6088367", "0.58284575", "0.56480026", "0.5622442", "0.5511425", "0.5502313", "0.5485509", "0.5462675", "0.5435368", "0.5433973", "0.542946", "0.5405268", "0.5205288", "0.51675516", "0.5159601", "0.5157442", "0.5153125", "0.5153125", "0.51464003", "0.51427025", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017", "0.51373017" ]
0.6161083
0
Test that mineral list with no pk will redirect
def test_mineral_list_redirect(self): resp = self.client.get(reverse('minerals:list')) self.assertEqual(resp.status_code, 302)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_shoplists_without_login_redirects(self):\n tester = app.test_client(self)\n response = tester.post('/show_shoplists', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_show_activities_without_login_redirects(self):\n User.users = {}\n tester = app.test_client(self)\n response = tester.post('/show_shoplists', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_shoplists_dashboard_without_login_redirects(self):\n tester = app.test_client(self)\n response = tester.get('/show_shoplists', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_list_none(self):\n self.model.objects.all().delete()\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 0)", "def test_mineral_detail_view_404(self):\n resp = self.client.get(reverse(\n 'minerals:detail',\n kwargs={'name': 'nothing'}))\n self.assertEqual(resp.status_code, 404)", "def test_list_view(self):\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_empty_list(self, client):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n assert 'There is no users yet' in str(response.content)", "def test_post_head_unauthorized(self):\n url = reverse('post-list')\n response = self.client.head(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_no_listings(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context[\"listings\"], [])", "def test_mineral_list_view(self):\n resp = self.client.get(reverse('minerals:list'))\n self.assertEqual(resp.status_code, 200)\n for mineral in self.minerals:\n self.assertIn(self.mineral, resp.context['minerals'])\n self.assertContains(resp, self.mineral.short_name)\n self.assertTemplateUsed(resp, 'minerals/mineral_list.html')", "def test_list_login():\n assert_redirect_to_login('/')\n assert_not_redirect_to_login('/')", "def test_url_vessel_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_lesson_list_empty(client, auth_user, init_database, add_data):\n response = client.post(url_for('root.index'),data=dict(email='[email protected]',password='password'))\n # try to get home\n response = client.get(url_for('lessons.list'))\n assert response.status_code == 200\n assert b'list-group-item-action disabled' in response.data #part of the table with lessons", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_redirect_register_home(self):\n with self.client as c:\n\n res = c.get(\"/\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/users/tester1\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/lists/111111\")\n self.assertEqual(res.status_code, 200)", "def test_get_list_or_404(self):\n self.Person.drop_collection()\n test_person = self.Person(name='Test')\n test_person.save()\n t2 = get_list_or_404(self.Person)\n self.assertEqual(len(t2), 1)\n self.Person.drop_collection()", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_invalid_resource_list_404(self):\n url = reverse(\"resources:resources\", (\"invalid\",))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def default(self, tg_errors=None):\n raise redirect('list')", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_list_not_admin2(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(\n reverse('retreat:waitqueuenotification-list'),\n format='json',\n )\n\n content = {\n 'count': 0,\n 'next': None,\n 'previous': None,\n 'results': [],\n }\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_01_admin_index_anonymous(self):\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.data)\r\n assert \"Please sign in to access this page\" in res.data, err_msg" ]
[ "0.6804747", "0.6639353", "0.648068", "0.6388541", "0.63525784", "0.6332835", "0.630934", "0.6214183", "0.617335", "0.61362755", "0.6120044", "0.60898554", "0.6088404", "0.6053614", "0.6034676", "0.6034676", "0.6034676", "0.6029755", "0.6013406", "0.60053384", "0.60053384", "0.60053384", "0.6002563", "0.5997434", "0.5981985", "0.5981985", "0.5981985", "0.5981985", "0.5979893", "0.5951142" ]
0.7354163
0
Train the network for ``max_steps`` steps. After each training epoch, validation performance is measured and visualizations are computed and logged to tensorboard.
def train(self, max_steps: int = 1, max_runtime=3600 * 24 * 7) -> None: self.start_time = datetime.datetime.now() self.end_time = self.start_time + datetime.timedelta(seconds=max_runtime) while not self.terminate: try: # --> self.train() self.model.train() # Scalar training stats that should be logged and written to tensorboard later stats: Dict[str, float] = {'tr_loss': 0.0} # Other scalars to be logged misc: Dict[str, float] = {} # Hold image tensors for real-time training sample visualization in tensorboard images: Dict[str, torch.Tensor] = {} running_acc = 0 running_mean_target = 0 running_vx_size = 0 timer = Timer() for inp, target in self.train_loader: inp, target = inp.to(self.device), target.to(self.device) # forward pass out = self.model(inp) loss = self.criterion(out, target) if torch.isnan(loss): logger.error('NaN loss detected! Aborting training.') raise NaNException # update step self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Prevent accidental autograd overheads after optimizer step inp.detach_() target.detach_() out.detach_() loss.detach_() # get training performance stats['tr_loss'] += float(loss) acc = metrics.bin_accuracy(target, out) # TODO mean_target = target.to(torch.float32).mean() print(f'{self.step:6d}, loss: {loss:.4f}', end='\r') self._tracker.update_timeline([self._timer.t_passed, float(loss), mean_target]) # Preserve training batch and network output for later visualization images['inp'] = inp images['target'] = target images['out'] = out # this was changed to support ReduceLROnPlateau which does not implement get_lr misc['learning_rate'] = self.optimizer.param_groups[0]["lr"] # .get_lr()[-1] # update schedules for sched in self.schedulers.values(): # support ReduceLROnPlateau; doc. uses validation loss instead # http://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau if "metrics" in inspect.signature(sched.step).parameters: sched.step(metrics=float(loss)) else: sched.step() running_acc += acc running_mean_target += mean_target running_vx_size += inp.numel() self.step += 1 if self.step >= max_steps: logger.info(f'max_steps ({max_steps}) exceeded. Terminating...') self.terminate = True break if datetime.datetime.now() >= self.end_time: logger.info(f'max_runtime ({max_runtime} seconds) exceeded. Terminating...') self.terminate = True break stats['tr_accuracy'] = running_acc / len(self.train_loader) stats['tr_loss'] /= len(self.train_loader) misc['tr_speed'] = len(self.train_loader) / timer.t_passed misc['tr_speed_vx'] = running_vx_size / timer.t_passed / 1e6 # MVx mean_target = running_mean_target / len(self.train_loader) if self.valid_dataset is None: stats['val_loss'], stats['val_accuracy'] = float('nan'), float('nan') else: valid_stats = self.validate() stats.update(valid_stats) # Update history tracker (kind of made obsolete by tensorboard) # TODO: Decide what to do with this, now that most things are already in tensorboard. if self.step // len(self.train_dataset) > 1: tr_loss_gain = self._tracker.history[-1][2] - stats['tr_loss'] else: tr_loss_gain = 0 self._tracker.update_history([ self.step, self._timer.t_passed, stats['tr_loss'], stats['val_loss'], tr_loss_gain, stats['tr_accuracy'], stats['val_accuracy'], misc['learning_rate'], 0, 0 ]) # 0's correspond to mom and gradnet (?) t = pretty_string_time(self._timer.t_passed) loss_smooth = self._tracker.loss._ema # Logging to stdout, text log file text = "%05i L_m=%.3f, L=%.2f, tr_acc=%05.2f%%, " % (self.step, loss_smooth, stats['tr_loss'], stats['tr_accuracy']) text += "val_acc=%05.2f%s, prev=%04.1f, L_diff=%+.1e, " % (stats['val_accuracy'], "%", mean_target * 100, tr_loss_gain) text += "LR=%.2e, %.2f it/s, %.2f MVx/s, %s" % (misc['learning_rate'], misc['tr_speed'], misc['tr_speed_vx'], t) logger.info(text) # Plot tracker stats to pngs in save_path self._tracker.plot(self.save_path) # Reporting to tensorboard logger if self.tb: self.tb_log_scalars(stats, 'stats') self.tb_log_scalars(misc, 'misc') if self.previews_enabled: self.tb_log_preview() self.tb_log_sample_images(images, group='tr_samples') self.tb.writer.flush() # Save trained model state self.save_model() if stats['val_loss'] < self.best_val_loss: self.best_val_loss = stats['val_loss'] self.save_model(suffix='_best') except KeyboardInterrupt: IPython.embed(header=self._shell_info) if self.terminate: return except Exception as e: traceback.print_exc() if self.ignore_errors: # Just print the traceback and try to carry on with training. # This can go wrong in unexpected ways, so don't leave the training unattended. pass elif self.ipython_on_error: print("\nEntering Command line such that Exception can be " "further inspected by user.\n\n") IPython.embed(header=self._shell_info) if self.terminate: return else: raise e self.save_model(suffix='_final')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, max_steps=100_000, instances=1, visualize=False, plot=None, max_subprocesses=0):\n self.agent.training = True\n if max_subprocesses == 0:\n # Use single process implementation\n self._sp_train(max_steps, instances, visualize, plot)\n elif max_subprocesses is None or max_subprocesses > 0:\n # Use multiprocess implementation\n self._mp_train(max_steps, instances, visualize, plot, max_subprocesses)\n else:\n raise HkException(f\"Invalid max_subprocesses setting: {max_subprocesses}\")", "def train_epoch(self, print_frequency=1, max_steps=0):\n ldx, ldz, lgx, lgz, lrx, lrz = 0, 0, 0, 0, 0, 0\n eps = 1e-9\n for batch, (real_samples, _) in enumerate(self.dataloader):\n real_samples = real_samples.to(self.device)\n ldx_, ldz_ = self.train_step_discriminators(real_samples)\n ldx += ldx_\n ldz += ldz_\n lgx_, lgz_, lrx_, lrz_ = self.train_step_generators(real_samples)\n lgx += lgx_\n lgz += lgz_\n lrx += lrx_\n lrz += lrz_\n if print_frequency and (batch+1) % print_frequency == 0:\n print(f\"{batch+1}/{len(self.dataloader)}:\"\n f\" G={lgx / (eps + (batch+1) * ALPHA_DISCRIMINATE_IMAGE):.3f},\"\n f\" E={lgz / (eps + (batch+1) * ALPHA_DISCRIMINATE_LATENT):.3f},\"\n f\" Dx={ldx / (eps + (batch+1)):.3f},\"\n f\" Dz={ldz / (eps + (batch+1)):.3f}\",\n f\" Rx={lrx / (eps + (batch+1) * ALPHA_RECONSTRUCT_IMAGE):.3f}\",\n f\" Rz={lrz / (eps + (batch+1) * ALPHA_RECONSTRUCT_LATENT):.3f}\",\n end='\\r',\n flush=True)\n if max_steps and batch == max_steps:\n break\n if print_frequency:\n print()\n lgx /= batch\n lgz /= batch\n ldx /= batch\n ldz /= batch\n lrx /= batch\n lrz /= batch\n return lgx, lgz, ldx, ldz, lrx, lrz", "def train(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n while self.episodes_done < num_episodes:\n self.trainOneEpisode(num_episodes, max_episode_steps, save_freq, render)\n self.saveCheckpoint()", "def train(self, max_epochs: int=100) \\\n -> Generator[Tuple[float, float, int], bool, None]:\n assert self.tf_init_done, \"Must call .init_tf() first!\"\n\n tr = tqdm.trange(max_epochs, desc='epoch', leave=True)\n mean_loss = None\n\n for epoch_num in tr:\n # only extend replay by a bit each time\n succ_rates = self._extend_replays(max(25 // len(self.problems), 1))\n succ_rate = np.mean(succ_rates)\n replay_sizes = self._get_replay_sizes()\n replay_size = sum(replay_sizes)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n self._log_op_value('succ-rate', succ_rate)\n self._log_op_value('replay-size', replay_size)\n # do a few batches of SGD (should keep us close to convergence)\n mean_loss = self._optimise(300)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n keep_going = yield succ_rate, mean_loss, replay_size\n if not keep_going:\n print('.train() terminating early')\n break", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def _sp_train(self, max_steps, instances, visualize, plot):\n # Keep track of rewards per episode per instance\n episode_reward_sequences = [[] for i in range(instances)]\n episode_step_sequences = [[] for i in range(instances)]\n episode_rewards = [0] * instances\n\n # Create and initialize environment instances\n envs = [self.create_env() for i in range(instances)]\n envs[0].render(mode='human')\n states = [env.reset()['observation'][0] for env in envs] # get the image\n\n for step in range(max_steps):\n for i in range(instances):\n if visualize: envs[i].render()\n action, angle_index, action_index = self.agent.act(states[i], i)\n\n next_state, reward, done, _ = envs[i].step(action)\n (next_image, next_depth) = next_state['observation']\n self.agent.push(\n Transition(states[i], [angle_index, action_index], reward, None if done else next_image), i)\n episode_rewards[i] += reward\n if done:\n episode_reward_sequences[i].append(episode_rewards[i])\n episode_step_sequences[i].append(step)\n episode_rewards[i] = 0\n if plot: plot(episode_reward_sequences, episode_step_sequences)\n (image, depth) = envs[i].reset()['observation']\n states[i] = image\n else:\n states[i] = next_image\n # Perform one step of the optimization\n self.agent.train(step)\n\n if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)", "def train(self,\n step_size=0.01,\n max_steps=50001,\n b_=0,\n verbose=True,\n optimizer=None,\n log_training = False,\n batch_size = 0):\n\n\n self.model_loaded = True\n if self.graph is None:\n self.graph = tf.Graph()\n build_graph = True\n else:\n build_graph = False\n\n with self.graph.as_default():\n\n if self.sess == None:\n sess = tf.Session()\n self.sess = sess\n else:\n sess = self.sess\n\n for n in self.nodes:\n n.set_prefactors()\n\n # Build all the required tensors\n logits_list = self.build_network()\n cost_list = self.get_cost(logits_list)\n train_feed_dict, valid_feed_dict = self.get_feed('train')\n\n\n\n cost = 0\n for c in cost_list:\n cost += c\n\n # Create regularization parameters for every distinct namescope\n b = {}\n if build_graph:\n for ns in self.distinct_namescopes():\n b[ns] = tf.placeholder(tf.float32, name = '{}/b'.format(ns))\n else:\n for ns in self.distinct_namescopes():\n b[ns] = self.graph.get_tensor_by_name('{}/b:0'.format(ns))\n\n # L2-loss\n loss = 0\n with tf.variable_scope(\"\", reuse = True):\n for n in self.nodes:\n if not isinstance(n, Subnetnode): continue\n\n for l, layer in enumerate(n.layers):\n name = n.name\n loss += tf.nn.l2_loss(tf.get_variable(\"{}/W{}\".format(name, l+1))) * \\\n b[name]/layer\n\n cost += loss\n\n if b_ == 0:\n b_ = [0] * len(self.distinct_namescopes())\n\n for i, ns in enumerate(self.distinct_namescopes()):\n train_feed_dict['{}/b:0'.format(ns)] = b_[i]\n valid_feed_dict['{}/b:0'.format(ns)] = 0\n\n\n if self.optimizer == None:\n if optimizer == None:\n self.optimizer = tf.train.AdamOptimizer(learning_rate = step_size)\n else:\n self.optimizer = optimizer\n\n train_step = self.optimizer.minimize(cost)\n\n # Workaround to load the AdamOptimizer variables\n if not self.checkpoint_path == None:\n saver = tf.train.Saver()\n saver.restore(self.sess,self.checkpoint_path)\n self.checkpoint_path = None\n\n ml.initialize_uninitialized(self.sess)\n\n self.initialized = True\n\n train_writer = tf.summary.FileWriter('./log/',\n self.graph)\n\n old_cost = 1e8\n\n statistics = {}\n statistics['time_trained'] = time()\n statistics['total_cost'] = []\n statistics['loss'] = []\n statistics['partial_cost'] = {}\n for t in self.find_targetnodes():\n statistics['partial_cost'][t.name] = []\n\n for _ in range(0,max_steps):\n\n if batch_size > 0:\n start = 0\n while(start != -1):\n batch_feed_dict, start = ml.get_batch_feed(train_feed_dict,\n start, batch_size)\n sess.run(train_step, feed_dict = batch_feed_dict)\n else:\n sess.run(train_step, feed_dict=train_feed_dict)\n\n # if _%int(max_steps/100) == 0 and adaptive_rate == True:\n # new_cost = sess.run(tf.sqrt(cost),\n # feed_dict=train_feed_dict)\n #\n # if new_cost > old_cost:\n # step_size /= 2\n # print('Step size decreased to {}'.format(step_size))\n # train_step = tf.train.GradientDescentOptimizer(step_size).minimize(cost)\n # old_cost = new_cost\n\n # Log training process\n if _%int(max_steps/100) == 0 and log_training:\n statistics['total_cost'].append(sess.run(tf.sqrt(cost),\n feed_dict=valid_feed_dict))\n statistics['loss'].append(sess.run(loss,\n feed_dict=valid_feed_dict))\n if len(cost_list) > 1:\n for t, c in zip(self.find_targetnodes(), cost_list):\n statistics['partial_cost'][t.name].append(sess.run(tf.sqrt(c),\n feed_dict=valid_feed_dict))\n\n # Print training process\n if _%int(max_steps/10) == 0 and verbose:\n print('Step: ' + str(_))\n print('Training set loss:')\n if len(cost_list) > 1:\n for t, c in zip(self.find_targetnodes(), cost_list):\n print('{}: {}'.format(t.name,sess.run(tf.sqrt(c),\n feed_dict=train_feed_dict)))\n print('Total: {}'.format(sess.run(tf.sqrt(cost-loss),\n feed_dict=train_feed_dict)))\n print('Validation set loss:')\n if len(cost_list) > 1:\n for t, c in zip(self.find_targetnodes(), cost_list):\n print('{}: {}'.format(t.name, sess.run(tf.sqrt(c),\n feed_dict=valid_feed_dict)))\n print('Total: {}'.format(sess.run(tf.sqrt(cost),\n feed_dict=valid_feed_dict)))\n print('--------------------')\n print('L2-loss: {}'.format(sess.run(loss,\n feed_dict=train_feed_dict)))\n\n # Final log entry\n\n statistics['total_cost'].append(sess.run(tf.sqrt(cost),\n feed_dict=valid_feed_dict))\n statistics['loss'].append(sess.run(loss,\n feed_dict=valid_feed_dict))\n if len(cost_list) > 1:\n for t, c in zip(self.find_targetnodes(), cost_list):\n statistics['partial_cost'][t.name].append(sess.run(tf.sqrt(c),\n feed_dict=valid_feed_dict))\n statistics['time_trained'] = time() - statistics['time_trained']\n return statistics", "def train(self, t_max):\n # Initialize session, model, variables.\n tf.global_variables_initializer().run()\n self.stat.load_model()\n self.target_network.run_copy()\n self.stat.t_start = self.stat.get_t()\n\n # Burn in.\n self.burn_in()\n\n print(\" [*] Training.\")\n\n # Progress display mechanism.\n if self.chtc:\n self.t_range = range(self.stat.t_start, t_max)\n else:\n self.t_range = trange(self.stat.t_start, t_max)\n\n # Start a new game.\n observation, reward, terminal = self.new_game()\n\n # Initialize history.\n for _ in range(self.history_length):\n self.history.add(observation)\n\n try:\n for self.t in self.t_range:\n # Linearly decaying exploration factor.\n epsilon = self.ep_end + max(0., (self.ep_start - self.ep_end) * (self.t_ep_end - max(0., self.t - self.t_learn_start)) / self.t_ep_end)\n\n # 1. Predict.\n action = self.predict(self.history.get(), epsilon)\n # 2. Act.\n observation, reward, terminal, _ = self.env.step(action, is_training=True)\n # 3. Observe.\n self.observe(observation, reward, action, terminal)\n # 4. Update.\n _, _, is_update = self.update()\n # 5. Test. \n terminal = self.test() or terminal\n\n # Notify the statistic module of the new iteration number, in case it needs to save the model.\n self.stat.on_step(self.t, is_update)\n \n # If the game has terminated, reset.\n if terminal:\n observation, reward, terminal = self.new_game()\n for _ in range(self.history_length):\n self.history.add(observation)\n\n except KeyboardInterrupt:\n print(\"\\n [!] Keyboard interrupt registered. Exiting!\")\n # The model is typically saved every t_test iterations, but if the training needs to be paused, we can save immediately before quitting.\n self.stat.save_model(self.t, self.stat.latest_saver)\n\n except Exception as e:\n print(\" [!] Unhandled exception encountered:\", e, \"\\nExiting!\")\n self.stat.save_model(self.t, self.stat.latest_saver)\n\n\n self.stat.zip_data(False)", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def step_train(self, max_iter):\n nzx, nzy = self.trn_graph.nonzero()\n n = len(self.trn_x_index)\n n_pos = len(nzx)\n for _ in range(max_iter):\n Y_pred, loss, grad_norm = self.train_fn(self.gX, self.hX, self.sym_g, self.sym_h,\n self.trn_graph, self.trn_x_index, self.trn_y_index)\n return Y_pred, loss, grad_norm", "def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )", "def train(self, num_epochs=None, num_steps=None):\r\n if num_steps is None:\r\n num_entries = np.min([s.num_entries for s in list(self._train_data.values())])\r\n num_steps = int(num_epochs * num_entries / self._batch_size)\r\n self.initialize_if_not(training=True)\r\n \r\n # ==================================== alonsag monitoring ====================================\r\n heatmap_monitor = []\r\n radius_monitor = []\r\n # ============================================================================================\r\n\r\n try:\r\n initial_step = self.checkpoint.load_all()\r\n current_step = initial_step\r\n \r\n print(\"-I-\", num_steps-initial_step, \"steps are planned\")\r\n for current_step in range(initial_step, num_steps):\r\n # Extra operations defined in implementation of this base class\r\n self.train_loop_pre(current_step)\r\n\r\n # Select loss terms, optimize operations, and metrics tensors to evaluate\r\n fetches = {}\r\n schedule_id = current_step % len(self._learning_schedule)\r\n schedule = self._learning_schedule[schedule_id]\r\n fetches['optimize_ops'] = self._optimize_ops[schedule_id]\r\n loss_term_keys, _ = zip(*list(schedule['loss_terms_to_optimize'].items()))\r\n fetches['loss_terms'] = [self.loss_terms['train'][k] for k in loss_term_keys]\r\n summary_op = self.summary.get_ops(mode='train')\r\n if len(summary_op) > 0:\r\n fetches['summaries'] = summary_op\r\n\r\n # Run one optimization iteration and retrieve calculated loss values\r\n self.time.start('train_iteration', average_over_last_n_timings=100)\r\n outcome = self._tensorflow_session.run(\r\n fetches=fetches,\r\n feed_dict={\r\n self.is_training: True,\r\n self.use_batch_statistics: True,\r\n }\r\n )\r\n self.time.end('train_iteration')\r\n\r\n # Print progress\r\n to_print = '%07d> ' % current_step\r\n to_print += ', '.join(['%s = %g' % (k, v)\r\n for k, v in zip(loss_term_keys, outcome['loss_terms'])])\r\n self.time.log_every('train_iteration', to_print, seconds=2)\r\n \r\n # ==================================== alonsag monitoring ====================================\r\n heatmap_monitor.append(outcome['loss_terms'][0])\r\n radius_monitor.append(outcome['loss_terms'][1])\r\n # ============================================================================================\r\n # Trigger copy weights & concurrent testing (if not already running)\r\n if self._enable_live_testing:\r\n self._tester.trigger_test_if_not_testing(current_step)\r\n\r\n # Write summaries\r\n if 'summaries' in outcome:\r\n self.summary.write_summaries(outcome['summaries'], current_step)\r\n\r\n # Save model weights\r\n if self.time.has_been_n_seconds_since_last('save_weights', 300) \\\r\n and current_step > initial_step:\r\n self.checkpoint.save_all(current_step)\r\n\r\n # Extra operations defined in implementation of this base class\r\n self.train_loop_post(current_step) \r\n except KeyboardInterrupt:\r\n # Handle CTRL-C graciously\r\n self.checkpoint.save_all(current_step)\r\n sys.exit(0)\r\n \r\n fig, (ax1, ax2) = plt.subplots(1, 2)\r\n ax1.plot(heatmap_monitor)\r\n ax1.set_title('Heatmap MSE Over Time')\r\n ax2.plot(radius_monitor)\r\n ax2.set_title('Radius MSE Over Time')\r\n plt.show()\r\n # Stop live testing, and run final full test\r\n if self._enable_live_testing:\r\n self._tester.do_final_full_test(current_step)\r\n\r\n # Save final weights\r\n if current_step > initial_step:\r\n self.checkpoint.save_all(current_step)", "def train(self, batch_size=64, n_episodes=100, max_episode_length=3000, save_path=\"last_save.h5\",\n load_path=None):\n\n self.explore = True # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, save_path=save_path,\n callbacks=self._train_callbacks_factory())", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def train(self):\n best_loss = math.inf\n for _ in range(self.epoch, self.end_epoch):\n self.summary_writer.add_scalar('epoch', self.epoch, self.total_steps)\n epoch_loss, _ = self.run_epoch(self.dataloader)\n if epoch_loss < best_loss:\n best_loss = epoch_loss\n # save best module as onnx format\n dummy_input = torch.randn((10, 3, self.image_dim, self.image_dim))\n module_path = os.path.join(self.models_dir, 'resnet.onnx')\n self.save_module(\n self.resnet.module, module_path, save_onnx=True, dummy_input=dummy_input)\n self.save_checkpoint('resnet_e{}_state.pth'.format(self.epoch))\n\n # validate step\n val_loss, _ = self.validate()\n\n # update learning rates\n self.lr_scheduler.step(val_loss)\n self.save_learning_rate(self.summary_writer, self.optimizer, self.total_steps)\n self.epoch += 1\n self.test()", "def _train_epoch(self, train_batches, data, max_metric_value, metric_save, patience, step_pbar):\n evaluate = True\n exit_tag = False\n num_steps = self.args.num_steps\n check_point, batch_size = self.args.check_point, self.args.batch_size\n save_dir, save_prefix = self.args.save_dir, self.args.algo\n\n for bitx, batch in enumerate(train_batches):\n if evaluate and self.global_step % self.eval_freq == 0:\n if data.dev_set is not None:\n dev_batches = data.gen_mini_batches('dev', 31928, shuffle=False)\n dev_loss, dev_perplexity, dev_perplexity_at_rank = self.evaluate(dev_batches, data)\n #print('dev loss=%s' % dev_loss, 'dev ppl=%s' % dev_perplexity, 'dev ppl at rank=', dev_perplexity_at_rank)\n\n test_batches = data.gen_mini_batches('test', 41405, shuffle=False)\n test_loss, test_perplexity, test_perplexity_at_rank = self.evaluate(test_batches, data)\n #print('test loss=%s' % test_loss, 'dev ppl=%s' % test_perplexity, 'dev ppl at rank=' , test_perplexity_at_rank)\n\n self.writer.add_scalar(\"dev/loss\", dev_loss, self.global_step)\n self.writer.add_scalar(\"dev/perplexity\", dev_perplexity, self.global_step)\n self.writer.add_scalar(\"test/loss\", test_loss, self.global_step)\n self.writer.add_scalar(\"test/perplexity\", test_perplexity, self.global_step)\n\n for trunc_level in self.trunc_levels:\n ndcg_version1, ndcg_version2 = self.relevance_estimator.evaluate(self, data, self.relevance_queries, trunc_level)\n self.writer.add_scalar(\"NDCG_version1/{}\".format(trunc_level), ndcg_version1, self.global_step)\n self.writer.add_scalar(\"NDCG_version2/{}\".format(trunc_level), ndcg_version2, self.global_step)\n\n if dev_loss < metric_save:\n metric_save = dev_loss\n patience = 0\n else:\n patience += 1\n # Trick: do not decay d_lr help convergence\n if patience >= self.patience:\n #self.adjust_learning_rate(self.discrim_optimizer, self.args.lr_decay)\n self.adjust_learning_rate(self.policy_optimizer, self.args.lr_decay)\n self.g_lr *= self.args.lr_decay\n #self.d_lr *= self.args.lr_decay\n self.writer.add_scalar('train/g_lr', self.g_lr, self.global_step)\n #self.writer.add_scalar('train/d_lr', self.d_lr, self.global_step)\n metric_save = dev_loss\n patience = 0\n self.patience += 1\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n self.global_step += 1\n step_pbar.update(1)\n QIDS = Variable(torch.from_numpy(np.array(batch['qids'], dtype=np.int64)))\n UIDS = Variable(torch.from_numpy(np.array(batch['uids'], dtype=np.int64)))\n VIDS = Variable(torch.from_numpy(np.array(batch['vids'], dtype=np.int64)))\n PRE_CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, :-1]))\n CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, 1:]))\n\n # generate trajectories\n for __ in range(self.args.d_step):\n actor_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n critic_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n CLICK_ = torch.zeros(QIDS.shape[0], 1, dtype=CLICKS.dtype)\n logits = torch.zeros(QIDS.shape[0], 0, 2)\n values = torch.zeros(QIDS.shape[0], 0)\n CLICKS_ = Variable(torch.zeros((QIDS.shape[0], 0), dtype=CLICKS.dtype))\n if self.use_cuda:\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS = QIDS.cuda(), UIDS.cuda(), VIDS.cuda(), PRE_CLICKS.cuda(), CLICKS.cuda()\n actor_rnn_state, critic_rnn_state, CLICK_ = actor_rnn_state.cuda(), critic_rnn_state.cuda(), CLICK_.cuda()\n logits, values, CLICKS_ = logits.cuda(), values.cuda(), CLICKS_.cuda()\n self.policy.eval()\n for i in range(self.max_d_num + 1):\n logit, value, actor_rnn_state, critic_rnn_state = self.policy(QIDS[:, i:i+1], \n UIDS[:, i:i+1], \n VIDS[:, i:i+1], \n CLICK_, \n actor_rnn_state, \n critic_rnn_state)\n if i > 0:\n CLICK_ = torch.distributions.Categorical(logit).sample()\n logits = torch.cat([logits, logit], dim=1)\n values = torch.cat([values, value], dim=1)\n CLICKS_ = torch.cat([CLICKS_, CLICK_], dim=1)\n\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n\n '''update discriminator'''\n for _ in range(self.args.k):\n self.discrim.train()\n self.discrim_optimizer.zero_grad()\n g_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS_)\n g_o_target = torch.ones((QIDS.shape[0], g_o.shape[1]))\n e_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS)\n e_o_target = torch.zeros((QIDS.shape[0], e_o.shape[1]))\n if self.use_cuda:\n g_o_target, e_o_target = g_o_target.cuda(), e_o_target.cuda()\n \n discrim_loss = self.discrim_criterion(g_o, g_o_target) + self.discrim_criterion(e_o, e_o_target)\n discrim_loss.backward()\n self.discrim_optimizer.step()\n self.writer.add_scalar('train/d_loss', discrim_loss.data, self.global_step)\n\n '''estimate advantage'''\n with torch.no_grad():\n self.discrim.eval()\n rewards = -torch.log(self.discrim(QIDS, UIDS, VIDS, CLICKS_)[0])\n # print(rewards.shape, values.shape)\n #print(tensor_type)\n #exit(0)\n deltas = torch.zeros(rewards.shape)\n advantages = torch.zeros(rewards.shape)\n prev_value = torch.zeros(rewards.shape[0])\n prev_advantage = torch.zeros(rewards.shape[0])\n if self.use_cuda:\n deltas, advantages = deltas.cuda(), advantages.cuda()\n prev_value, prev_advantage = prev_value.cuda(), prev_advantage.cuda()\n '''print(deltas)\n print(advantages)\n print(prev_value)\n print(prev_advantage)\n exit(0)'''\n\n for i in reversed(range(rewards.size(1))):\n deltas[:, i] = rewards[:, i] + self.gamma * prev_value - values[:, i]\n advantages[:, i] = deltas[:, i] + self.gamma * self.tau * prev_advantage\n prev_value = values[:, i]\n prev_advantage = advantages[:, i]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + MINF)\n # advantages = (returns - returns.mean())/returns.std()\n\n fixed_log_probs = torch.distributions.Categorical(logits).log_prob(CLICKS_[:, 1:])\n\n '''PPO update'''\n self.policy.train()\n optim_batchsize = 512\n optim_iter_num = int(math.ceil(QIDS.shape[0] / optim_batchsize))\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n for _ in range(self.args.g_step):\n perm = np.arange(QIDS.shape[0])\n np.random.shuffle(perm)\n\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS, CLICKS_, advantages, returns, fixed_log_probs = \\\n QIDS[perm].clone(), UIDS[perm].clone(), VIDS[perm].clone(), PRE_CLICKS[perm].clone(), \\\n CLICKS[perm].clone(), CLICKS_[perm].clone(), advantages[perm].clone(), returns[perm].clone(), fixed_log_probs[perm].clone()\n\n #print(QIDS)\n #exit(0)\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batchsize, min((i + 1) * optim_batchsize, QIDS.shape[0]))\n qids_b, uids_b, vids_b, pclicks_b, clicks_b, clicks__b, advantage_b, returns_b, fixed_log_probs_b = \\\n QIDS[ind], UIDS[ind], VIDS[ind], CLICKS_[ind, :-1], CLICKS[ind], CLICKS_[ind, 2:], \\\n advantages[ind], returns[ind], fixed_log_probs[ind]\n\n logits, values_pred, _, _ = self.policy(qids_b, uids_b, vids_b, pclicks_b)\n dist = torch.distributions.Categorical(logits)\n\n\n '''update critic'''\n value_loss = (values_pred - returns_b).pow(2).mean()\n '''optimizer policy'''\n log_probs_b = dist.log_prob(clicks__b)\n ratio = torch.exp(log_probs_b - fixed_log_probs_b)\n surr1 = ratio * advantage_b\n surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantage_b\n policy_surr = -torch.min(surr1, surr2).mean()\n pe = dist.entropy().mean()\n loss = value_loss + self.alpha * policy_surr - self.beta * pe\n\n self.policy_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 40)\n self.policy_optimizer.step()\n g_loss, _ = self.compute_loss(logits, clicks_b)\n\n self.writer.add_scalar('train/g_loss', g_loss.data, self.global_step)\n self.writer.add_scalar('train/g_valueloss', value_loss.data, self.global_step)\n self.writer.add_scalar('train/g_policysurr', policy_surr.data, self.global_step)\n self.writer.add_scalar('train/g_entropy', pe.data, self.global_step)\n\n if check_point > 0 and self.global_step % check_point == 0:\n self.save_model(save_dir, save_prefix)\n if self.global_step >= num_steps:\n exit_tag = True\n\n return max_metric_value, exit_tag, metric_save, patience", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target_seg, target_class) in enumerate(self.data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n self.optimizer.zero_grad()\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.train_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.train_metrics.update(met.__name__, met(output_seg, target_seg))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n self._visualize_input(data.cpu())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "def train(self, max_episodes= 1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n success = False\n i_episode = 0\n eps = eps_start\n \n print('Training in progress...')\n for i in range(max_episodes):\n score = self.run_training_episode(eps=eps)\n \n self.score_window.append(score)\n self.score_record.append(np.mean(self.score_window))\n \n i_episode += 1\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n\n if i_episode%100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n \n if i_episode>100:\n if np.mean(self.score_window)>self.criteria:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n success = True\n break\n\n if success:\n print('Criteria reached after {} episodes'.format(i_episode))\n else:\n print('Failed to reach Criteria after {} episodes'.format(i_episode))\n\n self.plot_training_progress()\n return success", "def train(self, data_set, learning_rate=0.01, steps=100):\n loss_network = LossNetwork(self.network, data_set)\n for s in range(steps):\n print('epoch: {}'.format(s))\n loss_network.forward()\n self._loss_by_step.append(loss_network.value)\n loss_network.set_utop_gradient(-1)\n loss_network.backward()\n loss_network.pull_weights(learning_rate)", "def train(self):\n args = self.args\n mnist = self.mnist\n feed_valid = {self.x: mnist.validation.images, self.y: mnist.validation.labels}\n feed_test = {self.x: mnist.test.images, self.y: mnist.test.labels}\n print('------------------------')\n print(\"epoch | l2_loss (v) | ce_loss (v) | valid_err (s) | valid_err (m) | test_err (s) | test_err (m)\")\n\n for ep in range(args.num_epochs):\n num_mbs = int(args.num_train / args.batch_size)\n for _ in range(num_mbs):\n batch = mnist.train.next_batch(args.batch_size)\n feed = {self.x: batch[0], self.y: batch[1]}\n self.sess.run(self.train_step, feed)\n valid_stats = self.sess.run(self.stats, feed_valid)\n test_stats = self.sess.run(self.stats, feed_test)\n\n valid_err_single = 100*(1.0-valid_stats['accuracy'])\n valid_err_model = self.eval_valid.eval(valid_stats['y_softmax'])\n test_err_single = 100*(1.0-test_stats['accuracy'])\n test_err_model = self.eval_test.eval(test_stats['y_softmax'])\n\n print(\"{:5} {:9.4f} {:9.4f} {:10.3f} {:10.3f} {:10.3f} {:10.3f}\".format(ep,\n valid_stats['l2_loss'], valid_stats['cross_entropy'],\n valid_err_single, valid_err_model,\n test_err_single, test_err_model))", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def fit(self, nsteps=50000, nmaxsteps=200):\n warnings.simplefilter(action=\"ignore\", category=DeprecationWarning)\n return self.agent.fit(self.env, nb_steps=nsteps, visualize=False, verbose=1, nb_max_episode_steps=nmaxsteps)", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train_online(self, epochs, iterations_per_epoch, batch_size, **kwargs):\n \n losses = dict()\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n for it in range(iterations_per_epoch):\n\n # Generate model indices and data on-the-fly\n model_indices, sim_data = self._forward_inference(batch_size, **kwargs)\n\n # One step backprop\n loss = self._train_step(model_indices, sim_data)\n\n # Store loss into dict\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def train(self, training_steps=10):", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})" ]
[ "0.6947871", "0.685724", "0.6677812", "0.6537507", "0.6392748", "0.638029", "0.6369513", "0.6317855", "0.6273792", "0.6248912", "0.6072411", "0.607182", "0.60491323", "0.6042872", "0.59877187", "0.59860224", "0.59725696", "0.5950437", "0.5927182", "0.5913687", "0.5912271", "0.59019417", "0.5897647", "0.589674", "0.5894141", "0.5890914", "0.58847904", "0.5863887", "0.586275", "0.58570796" ]
0.75708795
0
Save/serialize trained model state to files. If the model uses a parallel wrapper like ``torch.nn.DataParallel``, this is automatically detected and the wrapped model is saved directly to make later deserialization easier. This can be disabled by setting ``unwrap_parallel=False``.
def save_model(self, suffix: str = '', unwrap_parallel: bool = True) -> None: # TODO: Logging model = self.model # We do this awkard check because there are too many different # parallel wrappers in PyTorch and some of them have changed names # in different releases (DataParallel, DistributedDataParallel{,CPU}). is_wrapped = ( hasattr(model, 'module') and 'parallel' in str(type(model)).lower() and isinstance(model.module, torch.nn.Module) ) if is_wrapped and unwrap_parallel: # If a parallel wrapper was used, the only thing we should save # is the model.module, which contains the actual model and params. # If we saved the wrapped module directly, deserialization would # get unnecessarily difficult. model = model.module state_dict_path = os.path.join(self.save_path, f'state_dict{suffix}.pth') model_path = os.path.join(self.save_path, f'model{suffix}.pt') torch.save(model.state_dict(), state_dict_path) torch.save(model, model_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def save_model(self, path=\"/model\"):\n state = {\n 'epoch': self.epoch_counter,\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n torch.save(state, path)", "def save_model(self, filename='model.pt'):\n checkpoint = {\n 'input_size': self.linear_layers[0].in_features,\n 'output_size': self.linear_layers[-1].out_features,\n 'hidden_layers': [layer.out_features for layer in self.linear_layers[:-1]],\n 'state_dict': self.state_dict()}\n torch.save(checkpoint, filename)", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_model(self, name): \n torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))", "def save( self ):\n try:\n state_dict = {\n 'epoch': self.epoch,\n 'epoch_loss': self.epoch_loss,\n 'global_step': self.global_step,\n 'mechanism_weights': self.mechanism_weights, # Save row.\n 'router_state': self.router.state_dict(), # Save router state.\n 'nucleus_state': self.nucleus.state_dict(), # Save nucleus state.\n 'optimizer_state': self.optimizer.state_dict(), # Save optimizer.\n }\n torch.save( state_dict, \"{}/model.torch\".format( self.config.neuron.full_path, self.epoch_loss ) )\n bittensor.logging.success(prefix='Saved model', sufix='<blue>{}/model.torch</blue>'.format( self.config.neuron.full_path ) )\n except Exception as e:\n logger.exception('Failed to save model with error:{}', e)", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)", "def _save_model_and_checkpoint(self, save_model_class=False):\n import os\n\n try:\n import cloudpickle\n except ImportError:\n cloudpickle = None\n\n logger.info(\"Saving model...\")\n output_dir = os.path.join(\n self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n )\n\n # save model parameters\n self._save_checkpoint(self.model, trial=None, metrics=None)\n # save the serialized model\n if save_model_class:\n # TODO : fix serialization of DatasetSchema object\n if cloudpickle is None:\n raise ValueError(\"cloudpickle is required to save model class\")\n\n with open(os.path.join(output_dir, \"model_class.pkl\"), \"wb\") as out:\n cloudpickle.dump(self.model.module, out)", "def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)", "def save_trained_model(self):\n save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())\n logger.info(f\"DQL Trader: Saved trained model\")", "def save_model(model, model_index, args):\n logger.info(\"saving local model-{}\".format(model_index))\n with open(args.modeldir+\"trained_local_model\"+str(model_index), \"wb\") as f_:\n torch.save(model.state_dict(), f_)\n return", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save(self):\n try:\n torch.save(self.model.state_dict(), os.path.join(self.save_path, \"save_point.pth\"))\n except:\n print(\"Unable to save the model\")", "def maybe_save_model(savedir, container, state):\n if savedir is None:\n return\n start_time = time.time()\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n U.save_state(os.path.join(savedir, model_dir, \"saved\"))\n if container is not None:\n container.put(os.path.join(savedir, model_dir), model_dir)\n relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)\n if container is not None:\n container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')\n relatively_safe_pickle_dump(state[\"monitor_state\"], os.path.join(savedir, 'monitor_state.pkl'))\n if container is not None:\n container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')\n logger.log(\"Saved model in {} seconds\\n\".format(time.time() - start_time))", "def save(config: PyTextConfig, model: Model, meta: CommonMetadata) -> None:\n save_path = config.save_snapshot_path\n print(f\"Saving pytorch model to: {save_path}\")\n model.save_modules(base_path=config.modules_save_dir)\n state = OrderedDict(\n [\n (DATA_STATE, meta),\n (CONFIG_JSON, config_to_json(PyTextConfig, config)),\n (MODEL_STATE, model.state_dict()),\n ]\n ) # type: OrderedDict\n torch.save(state, save_path)", "def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name in [\"encoder\", \"decoder\"]:\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n if model_name == 'encoder':\n to_save = self.encoder.state_dict()\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n else:\n to_save = self.decoder.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.optimizer.state_dict(), save_path)", "def save_model(trainer, full_path):\n print(\"Writing model to disk...\")\n model = trainer.model.cpu()\n torch.save(model.state_dict(), full_path)\n if trainer.device is not None:\n trainer.model.cuda(trainer.device)", "def save(self, model_path: Union[Path, str]) -> None:\n torch.save(self.model.state_dict(), model_path)", "def save_model(self, output_model: ModelEntity):\n logger.info(\"called save_model\")\n buffer = io.BytesIO()\n hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))\n labels = {label.name: label.color.rgb_tuple for label in self._labels}\n model_ckpt = torch.load(self._model_ckpt)\n modelinfo = {\n \"model\": model_ckpt,\n \"config\": hyperparams_str,\n \"labels\": labels,\n \"VERSION\": 1,\n }\n\n torch.save(modelinfo, buffer)\n output_model.set_data(\"weights.pth\", buffer.getvalue())\n output_model.set_data(\n \"label_schema.json\",\n label_schema_to_bytes(self._task_environment.label_schema),\n )\n output_model.precision = self._precision", "def save_model(self, file_name: str):\n os.makedirs(self.checkpoint_path, exist_ok=True)\n to_save = {'params': self.params, 'opt_state': self.opt_state}\n path = os.path.join(self.checkpoint_path, file_name)\n with open(path, 'wb') as f:\n pickle.dump(to_save, f)", "def save_trained_model(self, model_trained):\n raise NotImplementedError", "def save(self):\r\n # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))\r\n torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_INN.pt'))", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self,\n filename):\n\n if self.model is None:\n raise ValueError('No model -- train or load model before saving!')\n\n # Check paths\n create_missing_folders([os.path.dirname(filename)])\n\n # Save settings\n logging.info('Saving settings to %s_settings.json', filename)\n\n settings = {'method': self.method,\n 'method_type': self.method_type,\n 'n_observables': self.n_observables,\n 'n_parameters': self.n_parameters,\n 'n_hidden': list(self.n_hidden),\n 'activation': self.activation}\n\n with open(filename + '_settings.json', 'w') as f:\n json.dump(settings, f)\n\n # Save state dict\n logging.info('Saving state dictionary to %s_state_dict.pt', filename)\n torch.save(self.model.state_dict(), filename + '_state_dict.pt')" ]
[ "0.7230332", "0.7162599", "0.7139393", "0.70704466", "0.7013741", "0.6979095", "0.6976652", "0.69272524", "0.69247466", "0.69072735", "0.6906031", "0.6898889", "0.6883414", "0.68668175", "0.6859346", "0.6856698", "0.68370855", "0.6805313", "0.68041414", "0.6796176", "0.6757067", "0.67344105", "0.6724654", "0.6695023", "0.66854054", "0.6682248", "0.6676001", "0.6668398", "0.6668398", "0.66668475" ]
0.8036562
0
Defines ``batch2img`` function dynamically, depending on tensor shapes. ``batch2img`` slices a 4D or 5D tensor to (C, H, W) shape, moves it to host memory and converts it to a numpy array. By arbitrary choice, the first element of a batch is always taken here. In the 5D case, the D (depth) dimension is sliced at z_plane. This function is useful for plotting image samples during training.
def _get_batch2img_function( batch: torch.Tensor, z_plane: Optional[int] = None ) -> Callable[[torch.Tensor], np.ndarray]: if batch.dim() == 5: # (N, C, D, H, W) if z_plane is None: z_plane = batch.shape[2] // 2 assert z_plane in range(batch.shape[2]) return lambda x: x[0, :, z_plane].cpu().numpy() elif batch.dim() == 4: # (N, C, H, W) return lambda x: x[0, :].cpu().numpy() else: raise ValueError('Only 4D and 5D tensors are supported.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def make_grid(batch_img: torch.Tensor,\n batch_mask: torch.Tensor,\n img_denormalize_fn: Callable,\n mask_palette: Optional[Sequence] = default_palette,\n batch_gt_mask: Optional[torch.Tensor] = None):\n assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor)\n assert len(batch_img) == len(batch_mask)\n\n if batch_gt_mask is not None:\n assert isinstance(batch_gt_mask, torch.Tensor)\n assert len(batch_mask) == len(batch_gt_mask)\n\n b = batch_img.shape[0]\n h, w = batch_img.shape[2:]\n\n le = 3 if batch_gt_mask is None else 3 + 2\n out_image = np.zeros((h * le, w * b, 3), dtype='uint8')\n\n for i in range(b):\n img = batch_img[i]\n mask = batch_mask[i]\n\n img = img_denormalize_fn(img)\n img = tensor_to_numpy(img)\n img = render_image(img)\n mask = mask.cpu().numpy()\n mask = render_mask(mask, mask_palette)\n\n out_image[0:h, i * w:(i + 1) * w, :] = img\n out_image[1 * h:2 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n mask,\n blend_alpha=0.4)\n out_image[2 * h:3 * h, i * w:(i + 1) * w, :] = mask\n\n if batch_gt_mask is not None:\n gt_mask = batch_gt_mask[i]\n gt_mask = gt_mask.cpu().numpy()\n gt_mask = render_mask(gt_mask, mask_palette)\n out_image[3 * h:4 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n gt_mask,\n blend_alpha=0.4)\n out_image[4 * h:5 * h, i * w:(i + 1) * w, :] = gt_mask\n\n return out_image", "def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)", "def get_batch(self, augmentor, batch_size=1):\n\n batch_image = []\n\n for _ in range(batch_size):\n image = scipy.misc.imread(name=random.choice(self.dataset), mode='RGB')\n\n if max(image.shape) > 1800.:\n image = scipy.misc.imresize(image, size=1800. / max(image.shape))\n if max(image.shape) < 800:\n # Resize the smallest side of the image to 800px\n alpha = 800. / float(min(image.shape))\n if alpha < 4.:\n image = scipy.misc.imresize(image, size=alpha)\n image = np.expand_dims(image, axis=0)\n else:\n image = scipy.misc.imresize(image, size=[800, 800])\n\n if augmentor:\n batch_image.append(augmentor(image).astype(np.float32))\n else:\n batch_image.append((image).astype(np.float32))\n # Now return a batch in correct form\n batch_image = np.asarray(batch_image)\n return {\"image\": batch_image}", "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\n if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if torch.is_tensor(tensor):\n tensor = [tensor]\n result = []\n for _tensor in tensor:\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\n\n n_dim = _tensor.dim()\n if n_dim == 4:\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\n img_np = img_np.transpose(1, 2, 0)\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 3:\n img_np = _tensor.numpy()\n img_np = img_np.transpose(1, 2, 0)\n if img_np.shape[2] == 1: # gray image\n img_np = np.squeeze(img_np, axis=2)\n else:\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 2:\n img_np = _tensor.numpy()\n else:\n raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')\n if out_type == np.uint8:\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\n img_np = (img_np * 255.0).round()\n img_np = img_np.astype(out_type)\n result.append(img_np)\n if len(result) == 1:\n result = result[0]\n return result", "def gen_batch_function(self, data_folder, image_shape):\n\n\t\tdef get_batches_fn(batch_size):\n\t\t\t#\n\t\t\timage_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\t\t\t#\n\t\t\tlabel_paths = {\tre.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n\t\t\t\tfor path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\t\t\t#\n\t\t\tbackground_color = np.array([255, 0, 0])\n\t\t\t#\n\t\t\trandom.shuffle(image_paths)\n\t\t\t#\n\t\t\tfor batch_i in range(0, len(image_paths), batch_size):\n\t\t\t\t#\n\t\t\t\timages = []\n\t\t\t\t#\n\t\t\t\tgt_images = []\n\t\t\t\t#\n\t\t\t\tfor image_file in image_paths[batch_i:batch_i+batch_size]:\n\t\t\t\t\t#\n\t\t\t\t\tgt_image_file = label_paths[os.path.basename(image_file)]\n\t\t\t\t\t#\n\t\t\t\t\timage = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = np.all(gt_image == background_color, axis=2)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\t\t\t\t\t#\n\t\t\t\t\timages.append(image)\n\t\t\t\t\t#\n\t\t\t\t\tgt_images.append(gt_image)\n\t\t\t\t#\n\t\t\t\tyield np.array(images), np.array(gt_images)\n\t\t#\n\t\treturn get_batches_fn", "def get_batches_fn(batch_size):\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def __call__(self, name, batch):\n\n def _check_img(tag_img):\n tag, img = tag_img\n\n assert img.ndim == 2 or img.ndim == 3, 'Only 2D (HW) and 3D (CHW) images are accepted for display'\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=0)\n else:\n C = img.shape[0]\n assert C == 1 or C == 3, 'Only (1, H, W) or (3, H, W) images are supported'\n\n return tag, img\n\n tagged_images = self.process_batch(name, batch)\n\n return list(map(_check_img, tagged_images))", "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n other_road_color = np.array([0,0,0])\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n image_flip = np.flip(image, axis=1)\n \n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n gt_image_flip = np.flip(gt_image, axis=1)\n \n #---------- classification : single road---------------------\n #gt_bg = np.all(gt_image == background_color, axis=2)\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n #------------------------------------------------------------\n \n \n #---------- classification : multi road----------------------\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg_flip = np.all(gt_image_flip == background_color, axis=2)\n \n # road segment\n road_mask = ((gt_image == other_road_color) | (gt_image == background_color))\n gt_road = np.invert(np.all(road_mask, axis=2))\n \n # flip of road segment\n road_mask_flip = ((gt_image_flip == other_road_color) | (gt_image_flip == background_color))\n gt_road_flip = np.invert(np.all(road_mask_flip, axis=2))\n \n # other_road segment\n oher_road_mask = (gt_image == other_road_color)\n gt_other_road = np.all(oher_road_mask, axis=2)\n \n # flip of other_road segment\n other_road_mask_flip = (gt_image_flip == other_road_color)\n gt_oher_road_flip = np.all(other_road_mask_flip, axis=2)\n\n # reshaping segments\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_other_road = gt_other_road.reshape(*gt_other_road.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n \n # reshaping flip segments\n gt_bg_flip = gt_bg_flip.reshape(*gt_bg_flip.shape, 1)\n gt_oher_road_flip = gt_oher_road_flip.reshape(*gt_oher_road_flip.shape, 1)\n gt_road_flip = gt_road_flip.reshape(*gt_road_flip.shape, 1)\n \n # concatenating classes bg, road, other_road\n gt_image = np.concatenate((gt_bg, gt_road, gt_other_road), axis=2)\n gt_image_flip = np.concatenate((gt_bg_flip, gt_road_flip, gt_oher_road_flip), axis=2)\n \n images.append(image)\n images.append(image_flip)\n \n gt_images.append(gt_image)\n gt_images.append(gt_image_flip)\n\n yield np.array(images), np.array(gt_images)", "def get_images_batch(self, batch_size):\n images = []\n labels = []\n num_classes = len(self.samples_per_class.keys())\n if batch_size < num_classes:\n raise Exception(\"Batch smaller than the number of classes!\")\n rest = batch_size % num_classes\n idxs = []\n if rest == 0:\n num_samples_per_class = batch_size // num_classes\n for key in self.samples_per_class.keys():\n idxs = np.hstack((\n idxs,\n np.random.choice(self.samples_per_class[key], num_samples_per_class)\n ))\n else:\n num_samples_per_class = np.hstack((\n np.full(rest, 1 + (batch_size // num_classes)),\n np.full(num_classes - rest, batch_size // num_classes)\n ))\n for ikey, key in enumerate(self.samples_per_class):\n idxs = np.hstack((\n idxs,\n np.random.choice(self.samples_per_class[key], [num_samples_per_class[ikey]])\n ))\n for idx in idxs:\n imgFilename = os.path.join(os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n idx)\n images.append(self.image_preprocessor(imageio.imread(imgFilename)))\n labels.append(\n tuple(self.dataframe_labeled_samples.loc[idx][self.class_names].values.astype('float')))\n\n images = np.asarray(images)\n labels = np.asarray(labels, 'int')\n return images, labels", "def tiled_images(self, name, tensor, data_format='channels_last', **kwargs):\r\n with tf.name_scope('viz_featuremaps'):\r\n if data_format == 'channels_first':\r\n # N x C x H x W\r\n tensor = tf.transpose(tensor, perm=(0, 2, 3, 1))\r\n # N x H x W x C\r\n tensor = tf.transpose(tensor, perm=(1, 2, 3, 0))\r\n # H x W x C x N\r\n self._4d_tensor(name, tensor, **kwargs)", "def batch_image_from_GPU_tensor(tensor):\n image = tensor.cpu()\n N,C,H,W = image.size()\n # to deal with batch images\n image_list =[]\n for idx in range(N):\n one_image = image[idx,:,:,:]\n one_image = torch.squeeze(one_image,dim=0)\n one_image = 0.5 * image + 0.5 # [-1, 1] --> [0, 1]\n one_image = transforms.ToPILImage()(image) # [0, 1] --> [0, 255]\n image_list.append(one_image)\n return image_list", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def adapt_batch(batch):\n image_arrays, labellings = batch\n\n current_batch_size = len(labellings)\n\n images = np.array(image_arrays).reshape(current_batch_size, *image_arrays[0].shape)\n\n padded_labellings = pad_labellings(labellings)\n\n labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)\n\n input_lengths = compute_input_lengths(image_arrays)\n\n label_lengths = np.array([len(labelling) for labelling in labellings],\n dtype=np.int32).reshape(current_batch_size, 1)\n\n return [images, labels, input_lengths, label_lengths], labels", "def get_batches_fn(batch_size):\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def preds(self, input_batch):\n raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])\n raw_output = tf.argmax(raw_output, axis=3)\n raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.\n return tf.cast(raw_output, tf.uint8)", "def get_batch(self,batch_size,s=\"train\"):\n X=self.data[s]\n n_classes, n_examples, w, h, col = X.shape\n\n #randomly sample several classes to use in the batch\n #categories = rng.choice(n_classes,size=(batch_size,),replace=False)\n categories = rng.choice(n_classes,size=(batch_size,))\n #initialize 2 empty arrays for the input image batch\n pairs=[np.zeros((batch_size, h, w,3)) for i in range(2)]\n #initialize vector for the targets, and make one half of it '1's, so 2nd half of batch has same class\n targets=np.zeros((batch_size,))\n targets[batch_size//2:] = 1\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 3)\n idx_2 = rng.randint(0, n_examples)\n #pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category \n else: \n #add a random number to the category modulo n classes to ensure 2nd image has\n # ..different category\n category_2 = (category + rng.randint(1,n_classes)) % n_classes\n pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,3)\n return pairs, targets", "def get_batch(batch_size, s=\"train\"):\n if s == \"train\":\n X = Xtrain\n categories = train_classes\n else:\n X = Xtest\n categories = test_classes\n\n n_classes, n_examples, w, h = X.shape\n\n # randomly sample several classes to use in the batch\n categories = rng.choice(n_classes, size=(batch_size), replace=False)\n\n # Initial 2 empty arrays for the input image_batch\n pairs = [np.zeros((batch_size, h, w, 1)) for i in range(2)]\n # initialize vector fo the targets\n targets = np.zeros((batch_size, 1))\n\n # make one half of it \"1\"s so 2nd half of batch has same class\n targets[batch_size // 2:] = 1\n\n\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i, :, :, :] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n\n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category\n else:\n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1, n_classes)) % n_classes\n\n pairs[1][i, :, :, :] = X[category_2, idx_2].reshape(w, h, 1)\n\n return pairs, targets", "def generate_batch(model, batch_size, test_data=False):\n if model == 'cnn':\n as_image = True\n else:\n as_image = False\n\n image = _read_images(test_data=test_data, as_image=as_image)\n label = _read_labels(test_data=test_data)\n\n images_batch, labels_batch = tf.train.batch([image, label],\n batch_size = batch_size,\n num_threads = 1,\n capacity = batch_size * 8)\n\n return images_batch, tf.reshape(labels_batch, [batch_size])", "def get_batch(batch_size,s=\"train\"):\n\n if s == 'train':\n X = Xtrain # X training input\n categories = train_classes # y categories\n else:\n X = Xval # X validation input\n categories = val_classes # y categories\n\n n_classes, n_examples, w, h = X.shape[0], X.shape[1], X.shape[2], X.shape[3]\n\n # randomly sample several classes to use in the batch of size n\n categories = rng.choice(n_classes,size=(batch_size,),replace=False)\n \n # initialize 2 empty arrays for the input image batch\n pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]\n \n # initialize vector for the targets\n targets=np.zeros((batch_size,))\n \n # one half of is full of '1's and 2nd half of batch has same class\n\n targets[batch_size//2:] = 1\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n \n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category \n else: \n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1,n_classes)) % n_classes\n \n pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,1)\n\n \n return pairs, targets", "def prepare_batch(batch, device=None, non_blocking=False):\n\timages, target = batch\n\treturn [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \\\n\t convert_tensor(target, device=device, non_blocking=non_blocking)", "def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9", "def batch_generate(self, inputs, labels, batch_size=64):\n inputs_image, inputs, labels = check_inputs_labels(inputs, labels)\n arr_x = inputs\n arr_y = labels\n len_x = inputs_image.shape[0]\n batch_size = check_int_positive('batch_size', batch_size)\n batches = int(len_x / batch_size)\n rest = len_x - batches*batch_size\n res = []\n for i in range(batches):\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[i*batch_size: (i + 1)*batch_size] for sub_items in arr_x])\n else:\n x_batch = arr_x[i*batch_size: (i + 1)*batch_size]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[i*batch_size: (i + 1)*batch_size] for sub_labels in arr_y])\n else:\n y_batch = arr_y[i*batch_size: (i + 1)*batch_size]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n if rest != 0:\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[batches*batch_size:] for sub_items in arr_x])\n else:\n x_batch = arr_x[batches*batch_size:]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[batches*batch_size:] for sub_labels in arr_y])\n else:\n y_batch = arr_y[batches*batch_size:]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n adv_x = np.concatenate(res, axis=0)\n return adv_x", "def preprocess_batch(images_batch, preproc_func=None):\n if preproc_func is None:\n return images_batch\n\n with tf.variable_scope('preprocess'):\n images_list = tf.split(images_batch, int(images_batch.shape[0]))\n result_list = []\n for img in images_list:\n reshaped_img = tf.reshape(img, img.shape[1:])\n processed_img = preproc_func(reshaped_img)\n result_list.append(tf.expand_dims(processed_img, axis=0))\n result_images = tf.concat(result_list, axis=0)\n return result_images", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size", "def generate_images(generator_model, output_dir, epoch, n_class, batch_size=128):\n label = np.random.randint(0, n_class, size=(batch_size, 1))\n label = np_utils.to_categorical(label, num_classes=n_class)\n test_image_stack = generator_model.predict([np.random.rand(10, 100), label])\n test_image_stack = (test_image_stack * 127.5) + 127.5\n test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))\n tiled_output = tile_images(test_image_stack)\n tiled_output = Image.fromarray(tiled_output, mode='L') # L specifies greyscale\n outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))\n tiled_output.save(outfile)", "def next_batch(self, batch_size):\n # Get next batch of image (path) and labels\n paths = self.train_images[self.train_pointer:self.train_pointer + batch_size]\n labels = self.train_labels[self.train_pointer:self.train_pointer + batch_size]\n # update train_pointer\n self.train_pointer += batch_size\n\n # Read images\n images = np.ndarray([batch_size, self.scale_size[0], self.scale_size[1], 3])\n for i in range(len(paths)):\n #print(paths[i])\n img = utils.load_image(paths[i])\n #img = cv2.imread(paths[i])\n # flip image at random if flag is selected\n if self.horizontal_flip and np.random.random() < 0.5:\n img = cv2.flip(img, 1)\n # rescale image\n #img = cv2.resize(img, (self.scale_size[0], self.scale_size[1]))\n #utils.load_image()\n #img = img.astype(np.float32)\n\n # subtract mean\n #img -= self.mean\n\n images[i] = img\n\n # Expand labels to one hot encoding\n one_hot_labels = np.zeros((batch_size, self.n_classes))\n for i in range(len(labels)):\n one_hot_labels[i][labels[i]] = 1\n\n # return array of images and labels\n return images, one_hot_labels", "def gen_batch_function(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n other_road_color = np.array([0,0,0])\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n image_flip = np.flip(image, axis=1)\n \n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n gt_image_flip = np.flip(gt_image, axis=1)\n \n #---------- classification : single road---------------------\n #gt_bg = np.all(gt_image == background_color, axis=2)\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n #------------------------------------------------------------\n \n \n #---------- classification : multi road----------------------\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg_flip = np.all(gt_image_flip == background_color, axis=2)\n \n # road segment\n road_mask = ((gt_image == other_road_color) | (gt_image == background_color))\n gt_road = np.invert(np.all(road_mask, axis=2))\n \n # flip of road segment\n road_mask_flip = ((gt_image_flip == other_road_color) | (gt_image_flip == background_color))\n gt_road_flip = np.invert(np.all(road_mask_flip, axis=2))\n \n # other_road segment\n oher_road_mask = (gt_image == other_road_color)\n gt_other_road = np.all(oher_road_mask, axis=2)\n \n # flip of other_road segment\n other_road_mask_flip = (gt_image_flip == other_road_color)\n gt_oher_road_flip = np.all(other_road_mask_flip, axis=2)\n\n # reshaping segments\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_other_road = gt_other_road.reshape(*gt_other_road.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n \n # reshaping flip segments\n gt_bg_flip = gt_bg_flip.reshape(*gt_bg_flip.shape, 1)\n gt_oher_road_flip = gt_oher_road_flip.reshape(*gt_oher_road_flip.shape, 1)\n gt_road_flip = gt_road_flip.reshape(*gt_road_flip.shape, 1)\n \n # concatenating classes bg, road, other_road\n gt_image = np.concatenate((gt_bg, gt_road, gt_other_road), axis=2)\n gt_image_flip = np.concatenate((gt_bg_flip, gt_road_flip, gt_oher_road_flip), axis=2)\n \n images.append(image)\n images.append(image_flip)\n \n gt_images.append(gt_image)\n gt_images.append(gt_image_flip)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn" ]
[ "0.5889001", "0.5834307", "0.5763898", "0.5548405", "0.54911685", "0.5404486", "0.5354863", "0.53391266", "0.52845913", "0.52194244", "0.50857246", "0.5081543", "0.50766444", "0.50407875", "0.50406784", "0.5035221", "0.5017781", "0.5017628", "0.50161916", "0.50143474", "0.50106484", "0.5008644", "0.49978873", "0.49782214", "0.49336508", "0.49336243", "0.4916725", "0.49045512", "0.48961982", "0.4876616" ]
0.7126705
0
Archiving the source folder, the training script and environment info. The training script is saved with the prefix '0' to distinguish from regular scripts.
def archive_backup(self): # Archiving the Training script shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path)) os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755) # Archiving the src folder pkg_path = os.path.dirname(arch_src) backup_path = os.path.join(self.save_path, 'src_backup') shutil.make_archive(backup_path, 'gztar', pkg_path) # Archiving the Environment Info env_info = collect_env.get_pretty_env_info() with open(self.save_path + '/env_info.txt', 'w') as f: f.write(env_info)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_gen(self):\r\n current_path = os.path.join(self.settings.save_path, 'current.json')\r\n current_folder_path = os.path.join(self.settings.save_path, 'current')\r\n history_path = os.path.join(self.settings.save_path, 'history')\r\n archive_folder_path = os.path.join(history_path, f'gen{self.generation}')\r\n archive_path = os.path.join(archive_folder_path, 'current') # no ending allowed\r\n archive_json_path = os.path.join(archive_folder_path, 'current.json')\r\n\r\n\r\n if not os.path.exists(current_path):\r\n raise FileNotFoundError\r\n if not os.path.exists(current_folder_path):\r\n raise FileNotFoundError\r\n\r\n os.makedirs(history_path, exist_ok=True)\r\n os.makedirs(archive_folder_path)\r\n\r\n cwd = os.getcwd()\r\n shutil.make_archive(archive_path, 'zip', current_folder_path)\r\n os.chdir(cwd)\r\n shutil.rmtree(current_folder_path, onerror=_ignore_del_dir_failure)\r\n os.chdir(cwd)\r\n\r\n os.rename(current_path, archive_json_path)", "def create_zip_file():\n shutil.make_archive(os.path.join(DIST_DIR, \"build\"), \"zip\", BUILD_DIR)", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)", "def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename", "def archive_experiment(experiment_dir: str,\n dst_dir: str,\n save_extensions: Union[str, Sequence[str]]='py',\n exclude_dirs: Union[str, Sequence[str]]='output',\n archive_format: str='zip',\n base_name: Optional[str]=None):\n # Format save_extensions for consistency\n # Make into a sequence\n if isinstance(save_extensions, str):\n save_extensions = [save_extensions]\n # Drop any .'s\n save_extensions = [s.strip('.') for s in save_extensions]\n # Format exclude_dirs for consistency\n if isinstance(exclude_dirs, str):\n exclude_dirs = [exclude_dirs]\n # Get default base name\n if base_name is None:\n experiment_path = os.path.abspath(experiment_dir)\n base_name = [p for p in experiment_path.split('/') if p][-1]\n\n # Full name of the archive name uses a time stamp\n timestamp = time.strftime('%b%d%Y_%H%M%S')\n archive_name = f'{base_name}_{timestamp}'\n\n # Use a temporary folder to create the archive\n tmp_folder = f'/tmp/{str(uuid.uuid4())}'\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n os.makedirs(tmp_folder)\n tmp_experiment = os.path.join(tmp_folder, archive_name)\n os.makedirs(tmp_experiment)\n\n # Recurse through the experiment directory and non-'output' subdirectories,\n # saving files to the temporary folder\n dirs_to_check = [experiment_dir]\n while len(dirs_to_check) > 0:\n # A directory to check (DTC), relative to the experiment_dir\n dtc = dirs_to_check.pop(0)\n # Full path to the DTC\n full_dtc = dtc if dtc == experiment_dir \\\n else os.path.join(experiment_dir, dtc)\n # List of all files and folders in the DTC\n dlist = os.listdir(full_dtc)\n # List of all files in the DTC\n files = [d for d in dlist\n if os.path.isfile(os.path.join(full_dtc, d))]\n # Check each file to see if it should be archived.\n for f in files:\n if f.split('.')[-1] in save_extensions:\n # Recreate the file structure inside experiment_dir, up to\n # the folder containing f\n tmp_save_dir = tmp_experiment if dtc == experiment_dir \\\n else os.path.join(tmp_experiment, dtc)\n os.makedirs(tmp_save_dir, exist_ok=True)\n # Save a copy of f\n shutil.copy2(os.path.join(full_dtc, f), tmp_save_dir)\n\n # Get non-excluded subdirectories\n subdirs = [d for d in dlist\n if os.path.isdir(os.path.join(full_dtc, d))\n and d not in exclude_dirs]\n # Track subdirectories as paths relative to the experiment dir\n if dtc != experiment_dir and len(subdirs) > 0:\n subdirs = [os.path.join(dtc, d) for d in subdirs]\n\n dirs_to_check += subdirs\n\n # At this point, all archivable files and folders are saved in tmp_folder.\n # Create an archive, coincidentally the same name as tmp_experiment's path\n tmp_archive = tmp_experiment[:]\n shutil.make_archive(tmp_archive, archive_format, tmp_folder, archive_name)\n # Get the full name of the archive. There should only be one file in\n # tmp_experiment\n tmp_archive_full = [f for f in os.listdir(tmp_folder)\n if os.path.isfile(os.path.join(tmp_folder, f))][0]\n # Copy the archive to its destination\n os.makedirs(dst_dir, exist_ok=True)\n shutil.move(os.path.join(tmp_folder, tmp_archive_full),\n os.path.join(dst_dir, tmp_archive_full),\n copy_function=shutil.copyfile)\n # Remove the temporary folder\n shutil.rmtree(tmp_folder)\n\n pass", "def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()", "def main(training_file_name):\n attribute, inverse = build_classifier(training_file_name)\n trained_file = open(TRAINED_FILE_NAME, mode='w')\n prolog(trained_file)\n write_body(trained_file, attribute, inverse)\n epilog(trained_file)", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def generate_test_dataset_archive(filepath, dataset):\n\n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Check if the dataset exists.\n # When not been generate it.\n if not os.path.isfile(filepath):\n\n print(\"Generating\", filepath)\n data = get_test_dataset(dataset)\n \n ensure_dir(os.path.dirname(filepath))\n idxgz.save(filepath, data)", "def test_archive_run(self):\n pass", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def _archive(self):\n # LOG: change this to something archive specific\n self.set_property('processing_type', 'archive')\n self.should_copy = False\n self.is_recursive = True", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def upload_resources(self):\n try:\n script_name, script_type, script_content = experiment_utils.get_source_script(self._context_symbol_table)\n\n if script_name:\n self.exp_metadata.script_name = script_name\n\n if script_content:\n file_utils.save_string(os.path.join(self.output_path, script_name), script_content)\n\n if script_type:\n self.exp_metadata.script_type = script_type\n\n except Exception as e:\n self.log.info(\"Failed to get script: \" + str(e))\n\n # TODO zip git directory here as well?\n\n if not self._env.is_connected():\n self.log.warning(\"Environment is not connected to Lab. Experiment data cannot be uploaded.\")\n return\n\n if self._tensorboard_path and os.path.isdir(self._tensorboard_path):\n remote_path = os.path.join(self.key,\n file_utils.get_folder_name(self._tensorboard_path) + \".zip\")\n tensorboard_key = self._env.upload_folder(self._tensorboard_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n if tensorboard_key:\n self.exp_metadata.resources.tensorboard_logs = tensorboard_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(tensorboard_key)\n\n if os.path.isfile(self.stdout_path):\n remote_path = os.path.join(self.key, file_utils.get_filename(self.stdout_path, exclude_extension=False))\n stdout_key = self._env.upload_file(self.stdout_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if stdout_key:\n self.exp_metadata.resources.stdout = stdout_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(stdout_key)\n\n if self.upload_code_script:\n # upload script file if available -> if file name was set and file exists in local folder\n if self.exp_metadata.script_name:\n script_path = os.path.join(self.output_path, self.exp_metadata.script_name)\n if os.path.isfile(script_path):\n remote_path = os.path.join(self.key, file_utils.get_filename(script_path, exclude_extension=False))\n script_file_key = self._env.upload_file(script_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if script_file_key:\n self.exp_metadata.resources.source_script = script_file_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(script_file_key)\n\n if self.upload_code_repo:\n # upload git repository if available\n git_root_dir = experiment_utils.get_git_root(self._exp_script_dir)\n if git_root_dir:\n # zip git repository with all files under 50 MB and ignore .git and environment folder\n zipped_repo = file_handler_utils.zip_folder(git_root_dir, max_file_size=50,\n excluded_folders=[\"environment\", \".git\"])\n if zipped_repo:\n remote_path = os.path.join(self.key,\n self._SOURCE_CODE_PACKAGE_NAME) # use original folder name?\n source_code_key = self._env.upload_file(zipped_repo,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if source_code_key:\n self.exp_metadata.resources.source_code = source_code_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(source_code_key)", "def start(self):\n self.dst = self.dst or os.path.basename(os.path.normpath(self.src))\n\n if not os.path.isdir(self.src):\n raise Exception(\"{} is not a folder\".format(self.src))\n\n self.output = open(self.dst, \"wb\")\n self.build()\n\n print \"Save application to {}\".format(self.dst)", "def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')", "def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)", "def save_arch(model, save_folder):\n with open(save_folder + '/architecture.txt','w') as a_save:\n model.summary(print_fn=lambda x: a_save.write(x + '\\n'))", "def save(self, directory='saves/'):\n # Create dirpath for temporary dir\n if directory[-1] != '/':\n directory += '/'\n dirpath = directory + self.name + '/'\n\n if not os.path.exists(dirpath): \n os.makedirs(dirpath)\n else:\n raise Exception(f'Path {dirpath} already exists.')\n\n # DQNs & Optimizer\n torch.save(self.policy_net.state_dict(), f'{dirpath}dqn.pth')\n torch.save(self.optimizer.state_dict(), f'{dirpath}optimizer.pth')\n\n # Trainer pamameters\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n\n with open(f'{dirpath}trainer_parameters.pick', 'wb') as file:\n pickle.dump(params, file)\n\n # Zip the saves in one .zip archive\n zippath = f'{directory}{self.name}'\n shutil.make_archive(zippath, 'zip', dirpath)\n\n # Remove the directory dirpath and files inside\n shutil.rmtree(dirpath)\n\n # Display\n print(f'Model saved at {zippath}.zip')", "def archive(self):\n suffixStart = len(self._filename)\n suffixEnd = suffixStart + 4\n archiveFile = osp.join(self.currentRecordingPath, self._filename + \".zip\")\n with ZipFile(archiveFile, 'w') as recarchive:\n for recfile in listdir(self.currentRecordingPath):\n if recfile.endswith(\".rec\", suffixStart, suffixEnd):\n recarchive.write(osp.join(self.currentRecordingPath, recfile), recfile)\n self._logger.info(\"Archive file has been created {}\".format(archiveFile))", "def save_deployed(platform='default'):\n print('***DEPLOY started at {}'.format(ctime(time())))\n version_num = input(\n 'Version number to save to. Don\\'t use parens, like 1.2(a). '\n 'Use just numbers, letters, and dots, like: 1.2.4a.: ')\n\n from_base_dir = join(exported_dir, project_name, platform)\n to_base_dir = join(deployed_dir, project_name, platform, str(version_num))\n\n if isdir(to_base_dir):\n resp = None\n while not resp:\n resp = input('Directory {} already exists. Delete it (y/n): '\n .format(to_base_dir)).lower()\n if resp not in ['y', 'n']:\n resp = None\n if resp == 'y':\n with settings(warn_only=True):\n result = local(' '.join(['rm -r', to_base_dir]))\n if result.return_code <= 1:\n pass\n else:\n print(result)\n raise SystemExit()\n else:\n quit()\n\n local(' '.join(['mkdir', to_base_dir]))\n local(' '.join(['mkdir', join(to_base_dir, 'obfuscated')]))\n local(' '.join(['mkdir', join(to_base_dir, 'unobfuscated')]))\n local(' '.join(['mkdir', join(to_base_dir, 'db')]))\n\n # Copy obfuscated program\n with lcd(join(to_base_dir, 'obfuscated')):\n local(' '.join(['cp -R', join(from_base_dir, 'obfuscated', '*'), '.']))\n\n # Copy unobfuscated program\n with lcd(join(to_base_dir, 'unobfuscated')):\n local(' '.join(['cp -R',\n join(from_base_dir, 'unobfuscated', '*'), '.']))\n\n # Copy db\n with lcd(join(to_base_dir, 'db')):\n local(' '.join(['cp -R', join(from_base_dir, 'db', '*'), '.']))\n\n print('***DEPLOY ended at {}'.format(ctime(time())))\n return True", "def compress_experiment(self, exp_id):\n exp_folder = self.um.experiment_path(str(exp_id))[:-1]\n exp_folder = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), exp_folder)\n archive_name = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"backup\", str(exp_id)+\".zip\")\n\n print exp_folder, archive_name\n retval = self.um.compress_folder_zip(exp_folder, archive_name)\n if retval:\n return \"Success\"\n else:\n return \"Failure\"", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def finalise(self):\n self.logger.info(\"Saving final versions of model...\")\n self.save_checkpoint(filename='final.pth.tar')", "def run(self):\n self.archive_bash_inits()\n self.create_paths()\n self.copy_files()\n self.make_git_config()\n self.ensure_bash_history()", "def create_backup_file(self, source_dir, archive_file):\n tar_file = tarfile.open(archive_file, 'w|gz')\n try:\n tar_file.add(source_dir)\n finally:\n tar_file.close()", "def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def copy_output_to_archive(wcl, jobfiles, fileinfo, level, task_label, exitcode):\n # fileinfo[filename] = {filename, fullname, sectname}\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n putinfo = {}\n\n\n # check each output file definition to see if should save file\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"Checking for save_file_archive\")\n\n for (filename, fdict) in fileinfo.items():\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"filename %s, fdict=%s\" % (filename, fdict))\n (filename, compression) = miscutils.parse_fullname(fdict['fullname'],\n miscutils.CU_PARSE_FILENAME|miscutils.CU_PARSE_COMPRESSION)\n\n putinfo[filename] = {'src': fdict['fullname'],\n 'compression': compression,\n 'filename': filename,\n 'filetype': fdict['filetype'],\n 'filesave': fdict['filesave'],\n 'filecompress': fdict['filecompress'],\n 'path': fdict['path']}\n\n transfer_job_to_archives(wcl, jobfiles, putinfo, level, task_label, exitcode)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")" ]
[ "0.55677986", "0.5472406", "0.53718376", "0.53715956", "0.5341127", "0.53302145", "0.5305708", "0.52769345", "0.52551335", "0.5242333", "0.523944", "0.52085453", "0.5205941", "0.51825994", "0.51786375", "0.5160358", "0.5140444", "0.5090497", "0.50718933", "0.50677264", "0.506256", "0.5060551", "0.5053968", "0.50437355", "0.5032511", "0.5027427", "0.5018795", "0.50137234", "0.5011542", "0.5008494" ]
0.78881186
0
Returns [opcode, immediate1, 2, 3].
def decode_opcode(self, packed_opcode): packed_opcode = str(packed_opcode) while(len(packed_opcode) != 5): packed_opcode = '0' + packed_opcode return [int(packed_opcode[3:]), packed_opcode[2] == '1', packed_opcode[1] == '1', packed_opcode[0] == '1']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]", "def return_middle_registers(self, opcode):\n registers = (opcode & 0x0FF0) >> 4\n register_x = (registers & 0xF0) >> 4\n register_y = registers & 0x0F\n return (register_x, register_y)", "def locctr_increamenter(opcode, operand):\n if opcode == \"base\" or opcode == \"nobase\":\n return [0, True]\n if opcode == \"resw\":\n try:\n value = int(operand)\n except ValueError:\n print(\"operand is not a number\")\n return False\n temp = int(operand) * 3\n return [temp, True]\n if opcode == \"resb\":\n try:\n \n value = int(operand)\n \n except ValueError:\n print(\"operand is not a number\")\n return False\n return [value, True]\n if opcode == \"word\":\n return [3, True]\n if opcode == \"byte\":\n value = operand.partition(\"'\")[-1].rpartition(\"'\")[0]\n temp = len(value)\n if operand[0].lower() == 'x':\n if temp % 2 == 0:\n return [temp // 2, True]\n else:\n return [(temp + 1) // 2, True]\n elif operand[0].lower() == 'c':\n return [temp, True]\n else:\n no_of_bits = math.ceil(math.log(int(operand),2))\n bytes = math.ceil(no_of_bits / 8)\n return [bytes, True]\n\n if opcode == \"rsub\":\n return [3, False]\n if opcode.find('+') != -1:\n return [4, False]\n if operand.find(\",\") != -1:\n str = operand.split(\",\")\n if str[1].lower() == 'x':\n return [3, False]\n return [2, False]\n if opcode == 'clear' or opcode == 'svc' or opcode == 'tixr':\n return [2, False]\n if operand.isspace():\n return [1, False]\n return [3, False]", "def opcode_list(self, script):\n opcodes = []\n new_pc = 0\n try:\n for opcode, data, pc, new_pc in self.get_opcodes(script):\n opcodes.append(self.disassemble_for_opcode_data(opcode, data))\n except ScriptError:\n opcodes.append(binascii.hexlify(script[new_pc:]).decode(\"utf8\"))\n\n return opcodes", "def read_opcode(self) -> int:\n\n offset = self.current_address - self.STARTING_ADDRESS\n\n if self.insight:\n self.insight.opcode(self.rom_data, offset)\n\n return self.rom_data[offset] << 8 | self.rom_data[offset + 1]", "def GetInstructionList():\n return [i[0] for i in ida_idp.ph_get_instruc() if i[0]]", "def decodeOpcode(opcode):\n\tresult = []\n\n\t# mnemonic\n\tid = opcode & ((1 << OPCODE_BITS) - 1)\n\tif id not in inverse:\n\t\traise ValueError('Unknown mnemonic')\n\n\tmnemonic = inverse[id]\n\tresult.append(mnemonic)\n\tmeta = mnemonics[mnemonic]\n\n\t# destination\n\tresult.append([None, operand.Register][mnemonics[mnemonic][0]])\n\n\t# arguments\n\tbit = 0x80\n\tfor i in xrange(1, len(meta)):\n\t\t# No params\n\t\tif len(meta[i]) == 0:\n\t\t\tcontinue\n\n\t\t# only one type\n\t\tif len(meta[i]) == 1:\n\t\t\tresult.append(meta[i][0])\n\t\t\tcontinue\n\n\t\t# two possible types\n\t\twhich = (opcode & bit) != 0\n\t\tresult.append(meta[i][which])\n\t\tbit >>= 1\n\n\treturn result", "def opcode(self):\n return struct.unpack('<H', self.pkt.payload[0:2])[0]", "def get_opcode(self, code):\r\n opcode = int(str(code)[-2:])\r\n return opcode", "def disassemble(self, script):\n return ' '.join(self.opcode_list(script))", "def read_opcode_info(filename):\r\n \r\n # Open file and read it into a list of lines.\r\n fin = open(filename, \"r\")\r\n lines = fin.readlines()\r\n fin.close()\r\n \r\n # We'll build a table with 256 entries, one per opcode.\r\n info = [[]] * 256;\r\n\r\n for line in lines:\r\n [opcode, _, line] = line.partition('\\t')\r\n [nbytes, _, line] = line.partition('\\t')\r\n [mnemonic, _, operands] = line.partition('\\t')\r\n i = int(opcode, 16)\r\n info[i] = [mnemonic.strip(), operands.strip(), int(nbytes)]\r\n \r\n return info", "def __get_actions(self, state, next_states):\r\n val_tok_mov = np.zeros((4, 4))\r\n for token_id in range(4):\r\n val_tok_mov[token_id] = self.__valid_token_moves(state, next_states[token_id], token_id)\r\n\r\n actions = np.logical_or.reduce((val_tok_mov[0,:], val_tok_mov[1,:], val_tok_mov[2,:], val_tok_mov[3,:]))\r\n\r\n return actions, val_tok_mov", "def get_opcode_mode(op):\n op_str = f\"{op:05}\"\n DE = int(op_str[-2:])\n C = int(op_str[2])\n B = int(op_str[1]) \n A = int(op_str[0]) \n\n return A, B, C, DE", "def GetIndex():\n\n global Asm\n\n index = assem.NowChar(True).upper()\n\n if index == ' ':\n # Mmm, missing operand\n errors.DoError('missoper', False)\n return (0, 'Z', 0) # Return dummy values\n\n incdec = 0\n if index == '-':\n # Pre-decrement\n incdec = 2\n index = assem.NowChar(True).upper()\n\n if index not in ('X', 'Y', 'Z'):\n # It's none of the legal index registers\n errors.DoError('badoper', False)\n return (0, 'Z', 0) # Return dummy values\n\n if incdec == 0:\n # It's not pre-decrement, so it may be post-incrment\n if assem.NowChar() == '+':\n incdec = 1\n assem.NowChar(True)\n\n offset = 0\n if dec.Asm.Mnemonic in ('LDD', 'STD') and incdec == 1:\n # An offset q must follow\n if index == 'X':\n # LDD X+q doesn't exist\n errors.DoError('badoper', False)\n offset = assem.EvalExpr()[0]\n\n if dec.Asm.Pass == 2 and (offset < 0 or offset > 63):\n # Check range only in pass 2\n errors.DoError('range', False)\n offset = 0\n\n # Split all the bits to fit the opcode gaps.\n offset = (offset & 7) + ((offset & 0x18) << 7) + ((offset & 0x20) << 8)\n\n return (incdec, index, offset)", "def __add_instruction(self, tokens : List[Union[str,int]]) -> List[Union[str,int]]:\n if tokens:\n return tokens[:syntaxParametersDict.get(tokens[0])+1]\n return []", "def _decode_insn(self, bitvec76, start, pos):\n insn_wnd = bitvec76[start:start + 0xC] # Opcode is 12 bits long. \n opcode = int(insn_wnd, 2) \n\n # print(f'[+] Instruction Window at {pos:2X}h: {insn_wnd} ~> Opcode: {opcode:03X}h')\n \n # Check if opcode is valid\n if opcode & 0x80 == 0:\n if (opcode & 0x3F) + 0xD != pos:\n raise IndexError(\"instruction and immediate size mismatch\")\n \n if opcode & 0x40 != 0:\n insn_wnd2 = bitvec76[start + 0xC:]\n operand = int(insn_wnd2, 2)\n else:\n insn_wnd2 = bitvec76[start + 0xC:]\n operand = int(insn_wnd2[::-1], 2)\n\n width = (opcode & 0x3F) + 1\n else:\n if opcode & 0xFF00 == 0x400 or opcode & 0xFF00 == 0xF00:\n raise IndexError(\"inappropriate combination of mnemonic and operand type\")\n if pos != 12:\n raise IndexError(\"inappropriate combination of operand type and immediate data\")\n\n insn_wnd2, operand = None, None\n width = (opcode & 0x3F) + 1\n \n return opcode >> 8, operand, width", "def read_all_command_registers(self):\n return self.COMMAND_REGISTERS", "def gprog():\n return [('ip','ip','u')]+[k for x in ['c','1','2','3'] for k in gcopy(\"op\"+x, 'ip')]", "def apply_opcode(opcode, instruction, registers):\n a = int(instruction[1])\n b = int(instruction[2])\n output_reg = int(instruction[3])\n\n if opcode == 'addr':\n output = registers[a] + registers[b]\n elif opcode == 'addi':\n output = registers[a] + b\n elif opcode == 'mulr':\n output = registers[a] * registers[b]\n elif opcode == 'muli':\n output = registers[a] * b\n elif opcode == 'banr':\n output = registers[a] & registers[b]\n elif opcode == 'bani':\n output = registers[a] & b\n elif opcode == 'borr':\n output = registers[a] | registers[b]\n elif opcode == 'bori':\n output = registers[a] | b\n\n elif opcode == 'setr':\n output = registers[a]\n elif opcode == 'seti':\n output = a\n\n elif opcode == 'gtir':\n if a > registers[b]:\n output = 1\n else:\n output = 0\n elif opcode == 'gtri':\n if registers[a] > b:\n output = 1\n else:\n output = 0\n elif opcode == 'gtrr':\n if registers[a] > registers[b]:\n output = 1\n else:\n output = 0\n\n elif opcode == 'eqir':\n if a == registers[b]:\n output = 1\n else:\n output = 0\n elif opcode == 'eqri':\n if registers[a] == b:\n output = 1\n else:\n output = 0\n elif opcode == 'eqrr':\n if registers[a] == registers[b]:\n output = 1\n else:\n output = 0\n\n registers[output_reg] = output", "def read_operation(self, opcode: int) -> int:\n\n if self.insight:\n self.insight.operation(opcode)\n\n return opcode & 0xF000", "def get_disasm_line( ea ):\r\n\top1 = ua_outop2( ea, 0, 0 )\t\r\n\top2 = ua_outop2( ea, 1, 0 )\r\n\top3 = ua_outop2( ea, 2, 0 )\r\n\tif op1 == None:\r\n\t\top1 = \"\"\r\n\telse:\r\n\t\top1 = idaline_to_string( op1 )\r\n\tif op2 == None:\r\n\t\top2 = \"\"\r\n\telse:\r\n\t\top2 = idaline_to_string( op2 )\r\n\tif op3 == None:\r\n\t\top3 = \"\"\r\n\telse:\r\n\t\top3 = idaline_to_string( op3 )\r\n\tret = [ ea, ua_mnem( ea ), op1, op2, op3 ]\r\n\treturn ret", "def get_ops():\n li = [\"EOF\",\"ADD\",\"SUB\",\"MUL\",\"DIV\",\"POW\",\"BITAND\",\"BITOR\",\"CMP\",\"GET\", \\\n \"SET\",\"NUMBER\",\"STRING\",\"GGET\",\"GSET\",\"MOVE\",\"DEF\",\"PASS\", \\\n \"JUMP\",\"CALL\",\"RETURN\",\"IF\",\"DEBUG\",\"EQ\",\"LE\",\"LT\",\"DICT\", \\\n \"LIST\",\"NONE\",\"LEN\",\"LINE\",\"PARAMS\",\"IGET\",\"FILE\",\"NAME\", \\\n \"NE\",\"HAS\",\"RAISE\",\"SETJMP\",\"MOD\",\"LSH\",\"RSH\",\"ITER\",\"DEL\", \\\n \"REGS\",\"BITXOR\", \"IFN\", \"NOT\", \"BITNOT\"]\n dic = {}\n for i in li:\n dic[i] = li.index(i)\n return dic", "def getOperandsRead(self):\n # pylint: disable=no-self-use\n return []", "def _extract_commands(self, data):\n version = data[6]\n if version > 1:\n raise CarError('only version 1 is supported')\n if data[8] == 1:\n if self.inputastext is None:\n self.inputastext = True\n if data[9] == 1:\n if self.outputastext is None:\n self.outputastext = True\n data = data[10:]\n data = struct.unpack('<' + 'I' * (len(data) // 4), data)\n commands = tuple((data[i], data[i + 1]) for i in range(3, len(data), 2))\n for x, a in filter(lambda x: x[0] in (GOTO, IF), commands):\n if a >= len(commands):\n raise CarError('code position out of scope')\n return commands, data[:3]", "def regPllQuery(cls):\n regs = np.zeros(cls.REG_PACKET_LEN, dtype='<u1')\n regs[0] = 0 # No sequence start\n regs[1] = 1 # Readback after 2us\n return regs", "def read_instr(self):\n label = self.nr\n opcode = self.read_word()\n if opcode not in Instrs:\n raise ValueError(\"Unknown opcode %s\" % opcode)\n\n name, arg_names = Instrs[opcode]\n if opcode == Opcode.SWITCH:\n n = self.read_word()\n size_tag = n >> 16\n size_long = n & 0xFFFF\n size = size_tag + size_long\n tab = []\n for _ in range(size):\n tab.append(self.read_word())\n args = [n, tab]\n elif opcode == Opcode.CLOSUREREC:\n f = self.read_word()\n v = self.read_word()\n o = self.read_word()\n t = []\n for _ in range(f - 2):\n t.append(self.read_word())\n args = [f, v, o, t]\n else:\n # Normal opcode:\n args = []\n for arg_name in arg_names:\n # if arg_name in ['n', 's', 'ofs', 's', 't', 'p']:\n if True:\n arg = self.read_word()\n else:\n raise NotImplementedError(arg_name)\n args.append(arg)\n # print(label, name, args)\n ins = Instruction(opcode, name, args)\n ins.label = label\n return ins", "def cinsn_details(self):\n\n if self.op not in self.op_to_typename:\n raise RuntimeError('unknown item->op type')\n\n opname = self.opname\n if opname == 'empty':\n return self\n\n if opname in ['break', 'continue']:\n return None\n\n return getattr(self, 'c' + opname)", "def get_actions(self):\r\n return -4,4", "def read_idcode_opcode(device, idcode_opcode):\n opcode_length = len(idcode_opcode)\n data = bytearray()\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b11111)) # go to reset\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00110)) # go to shift-ir\n data.extend((WRITE_BITS_NVE_LSB, opcode_length - 2, int(idcode_opcode))) # shift IDCODE opcode\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00111)) # go to shift-dr\n data.extend((READ_BYTES_NVE_LSB, 3, 0)) # read command\n device.write(bytes(data)) # send off MPSSE commands\n idcode = device.read(4)[::-1]\n return \"\".join(format(byte, \"08b\") for byte in idcode)", "def process(opcode):\n opcode.process()" ]
[ "0.6129944", "0.57926476", "0.5783177", "0.56775135", "0.56415033", "0.56394535", "0.5629857", "0.5604485", "0.54920286", "0.54577243", "0.5452047", "0.5442101", "0.5421196", "0.53791213", "0.53581613", "0.53458023", "0.5313317", "0.5296601", "0.52932817", "0.5269778", "0.52652967", "0.5249738", "0.5240731", "0.5210264", "0.52016675", "0.519065", "0.51815915", "0.5163782", "0.5158708", "0.51275903" ]
0.5846213
1
Steps the machine 1 instruction, returning True if halted.
def step(self): start_pc = self.pc decoded_opcode = self.decode_opcode(self.eat_pc(True)) print('Step from PC=%d (%s)' % (start_pc, decoded_opcode)) opcode = decoded_opcode[0] if 1 == opcode or 2 == opcode: self.add_multiply_instruction(decoded_opcode) elif 3 == opcode: self.input_instruction(decoded_opcode) elif 4 == opcode: self.output_instruction(decoded_opcode) elif 5 == opcode or 6 == opcode: self.jump_instruction(decoded_opcode) elif 7 == opcode or 8 == opcode: self.compare_instruction(decoded_opcode) elif 99 == opcode: self.halt_instruction(decoded_opcode) else: print('Unknown opcode: ', opcode) raise AssertionError('!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self) -> bool:\n raise NotImplementedError()", "def step(self):\n # parse opcode and parameter mode(s) from instruction\n # (convert integer into 5-digit string with zeroes before parsing)\n instruction = str(self.program[self.index]).zfill(5)\n opcode = int(instruction[-2:])\n param1_mode = int(instruction[-3])\n param2_mode = int(instruction[-4])\n param3_mode = int(instruction[-5])\n\n # opcode to halt program\n if opcode == 99:\n self.halted = True\n return\n\n # opcodes for addition or multiplication\n if opcode in (1, 2):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n if opcode == 1:\n total = val1 + val2\n elif opcode == 2:\n total = val1 * val2\n\n self.set_value(self.index+3, param3_mode, total)\n self.index += 4\n return\n\n # opcode for input\n if opcode == 3:\n try:\n inputval = self.inputs.pop(0)\n except IndexError:\n # no input available, halt program until external process\n # adds input and restarts the process\n self.halted = True\n return\n\n self.set_value(self.index+1, param1_mode, inputval)\n self.index += 2\n return\n\n # opcode for output\n if opcode == 4:\n self.outputs += [self.get_value(self.index+1, param1_mode)]\n self.index += 2\n return\n\n # opcodes for jump-if-true / jump-if-false\n if opcode in (5, 6):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n # Should jump; update instruction pointer directly\n if (opcode == 5 and val1 != 0) or (opcode == 6 and val1 == 0):\n self.index = val2\n return\n\n # No action, continue to next instruction\n self.index += 3\n return\n\n # opcode for less than / equal to\n if opcode in (7, 8):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n # Default 0 (False), set to 1 if True\n result = 0\n if opcode == 7 and val1 < val2:\n result = 1\n elif opcode == 8 and val1 == val2:\n result = 1\n\n self.set_value(self.index+3, param3_mode, result)\n self.index += 4\n return\n\n # opcode for relative base offset\n if opcode == 9:\n self.relative_base += self.get_value(self.index+1, param1_mode)\n self.index += 2\n return\n\n raise Exception(\"unknown opcode, something went wrong\")", "def run_program(self) -> bool:\n try:\n while instruct := next(self.__instruction()):\n self.__execute(instruct)\n except ExecutionFinished:\n # Program finished\n return True\n # Program finished without halt\n return False", "def step(self):\n try:\n self.instructions[self.program[self.instruction_pointer]]()\n self.instruction_pointer+=1\n except tape.TapeError:\n #print \"Tape underflow, instruction number: \"+str(self.instruction_pointer)\n raise", "def step(self):\r\n if self.pc < 0 or self.pc >= len(self.instructions):\r\n raise Exception('Index out of bounds for pc.')\r\n ins, x, *optionals = self.instructions[self.pc].split()\r\n y = optionals[0] if len(optionals) > 0 else None\r\n if ins == 'set':\r\n self.registers[x] = self._get_value(y)\r\n elif ins == 'sub':\r\n self.registers[x] -= self._get_value(y)\r\n elif ins == 'mul':\r\n self.registers[x] *= self._get_value(y)\r\n self.mul_counter += 1\r\n elif ins == 'jnz':\r\n if self._get_value(x) != 0:\r\n # -1 since we always add 1 to self.pc later in the method.\r\n self.pc += self._get_value(y) - 1\r\n else:\r\n raise Exception('Unable to parse instruction: ' + self.instructions[self.pc])\r\n self.pc += 1", "def step(self):\n try:\n current_symbol = self._tape[self._head]\n next_symbol, direction, self._current_state = self._transitions.get(\n self._current_state\n ).get(current_symbol)\n except:\n return True\n\n self._tape[self._head] = next_symbol\n self._head += 1 if direction else -1\n\n if self._head < 0:\n self._tape.insert(0, self._blank_symbol)\n self._head = 0\n elif self._head >= len(self._tape):\n self._tape.append(self._blank_symbol)\n self._head = len(self._tape) - 1\n\n return False", "def run_until_stop(self):\n while self.commands[self.pointer] != END:\n # Get the cmd\n cmd = self.commands[self.pointer]\n opcode = cmd % 100\n modes = cmd // 100\n \n vals, locs, self.pointer = get_vals_and_locs(opcode, modes, self.pointer, self.commands)\n \n if opcode == ADD:\n self.commands[locs[2]] = vals[0] + vals[1]\n elif opcode == MUL:\n self.commands[locs[2]] = vals[0] * vals[1]\n elif opcode == INP:\n if self.inputs:\n self.commands[locs[0]] = self.inputs.pop(0)\n else:\n # Put the pointer back, so we run this opcode again\n self.pointer -= 2\n return False\n elif opcode == OUT:\n self.outputs.append(vals[0])\n elif opcode == JIT:\n if vals[0] != 0:\n self.pointer = vals[1]\n elif opcode == JIF:\n if vals[0] == 0:\n self.pointer = vals[1]\n elif opcode == LT:\n self.commands[locs[2]] = 1 if vals[0] < vals[1] else 0\n elif opcode == EQ:\n self.commands[locs[2]] = 1 if vals[0] == vals[1] else 0\n else:\n print(\"FAIL????\")\n\n return True", "def run(self):\n\n self.publisher.publish('run-start', vm=self)\n\n self.halt = False\n\n while not self.halt:\n self.publisher.publish('step', vm=self)\n\n if self.exec_ptr > (len(self.memory) - 1):\n raise OverflowError(\"Execution pointer has overran memory: \" + str(self.exec_ptr))\n\n a = b = c = None\n\n instruction = self.memory[self.exec_ptr]\n\n if instruction == HALT:\n self.halt = True\n continue\n elif instruction == SET:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_set(a, b)\n\n self.exec_ptr += 3\n elif instruction == PUSH:\n a = self.get_a_param()\n\n self.instruction_push(a)\n self.exec_ptr += 2\n elif instruction == POP:\n a = self.get_a_param()\n\n self.instruction_pop(a)\n\n self.exec_ptr += 2\n elif instruction == EQ:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_eq(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == GT:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_gt(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == JMP:\n a = self.get_a_param()\n\n self.instruction_jmp(a)\n elif instruction == JT:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_jt(a, b)\n elif instruction == JF:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_jf(a, b)\n elif instruction == ADD:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_add(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == MULT:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_mult(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == MOD:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_mod(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == AND:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_and(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == OR:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_or(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == NOT:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_not(a, b)\n\n self.exec_ptr += 3\n elif instruction == RMEM:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_rmem(a, b)\n\n self.exec_ptr += 3\n elif instruction == WMEM:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_wmem(a, b)\n\n self.exec_ptr += 3\n elif instruction == CALL:\n a = self.get_a_param()\n\n self.instruction_call(a)\n elif instruction == RET:\n self.instruction_ret()\n elif instruction == OUT:\n a = self.get_a_param()\n\n self.instruction_out(a)\n\n self.exec_ptr += 2\n elif instruction == IN:\n a = self.get_a_param()\n\n self.instruction_in(a)\n\n self.exec_ptr += 2\n elif instruction == NOOP:\n self.exec_ptr += 1\n continue\n else:\n raise ValueError(\"Unknown instruction \" + str(instruction))\n\n self.publisher.publish('run-end', vm=self)", "def step(a=0):\n\n global simulator, recorder\n if simulator is None:\n print \"Program is not started\"\n return\n __record(pc(), step, a)\n try:\n simulator.step(a)\n except:\n simulation_error()\n exec_hooks(step)\n arrows()", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def do_step(self) -> None:", "def execute(self):\n while len(self.program)>(self.instruction_pointer):\n self.step()", "def run_one_step(self):\n pass", "def next_step(self, p1, p2):\n\n self.index += 1\n out('Protocol step' + str(self.index))\n\n if self.index == 1:\n return prepare(p1, p2)\n\n elif self.index == 2:\n return share_graphs(p1, p2)\n\n elif self.index == 3:\n if turn(p1, p2):\n # The game is over\n\n return False\n else:\n # It's a draw. The game needs a new turn\n self.index -= 1\n return False\n\n elif self.index == 4:\n if play_again(p1, p2):\n self.index = 2\n return False\n else:\n return True", "def step(self):\r\n cmd = struct.pack('>B', 54)\r\n self.send(cmd)", "def finished(self, step: t.Optional[int] = None) -> bool:\n if step is None:\n return self.status() == self.expectedstatus()\n return self.status() & (1 << step)", "def step(self, timeout: Optional[float] = None) -> bool:\n self.go()\n return self.wait_done(timeout=timeout)", "def _run_next_automation_action(self, sequence_iterator: Iterator[AutomationTick]) -> bool:\n automation_tick = next(sequence_iterator, None)\n\n if not automation_tick:\n return True\n\n action = automation_tick.get_action()\n\n current_action_key_id: KeyID = action.value\n\n # sleep for a random amount of time between each tick\n time.sleep(self._get_tick_random_time_buffer())\n\n self._tick(current_action_key_id, automation_tick.get_duration())\n\n return False", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def step(self):\n if self.op_state != Turing_Machine.RUNNING:\n return\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n return\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n if self.prover:\n # Log the configuration and see if we can apply a rule.\n cond, new_tape, num_steps, replace_vars = self.prover.log(self.tape, self.state, self.step_num, self.num_loops-1)\n\n # If the prover has been printing, give us a newline and remind us\n # what the current configuration is.\n # TODO(sligocki): Figure out how to get this to happen only when prover\n # actually printed something (it doesn't if it just logged).\n #if self.prover.verbose:\n # print\n # self.num_loops -= 1 # Kludgey :/\n # self.verbose_print()\n # self.num_loops += 1\n\n # Proof system says that machine will repeat forever\n if cond == Turing_Machine.INF_REPEAT:\n self.op_state = Turing_Machine.INF_REPEAT\n self.inf_reason = PROOF_SYSTEM\n self.verbose_print()\n return\n # Proof system says that we can apply a rule\n elif cond == Turing_Machine.RUNNING:\n # TODO(shawn): This seems out of place here and is the only place in\n # the Simulator where we distinguish Algebraic_Expressions.\n # We should clean it up in some way.\n if replace_vars:\n assert self.options.allow_collatz\n # We don't want the update below to overwrite things.\n assert not frozenset(self.replace_vars.keys()).intersection(\n frozenset(replace_vars.keys()))\n self.replace_vars.update(replace_vars)\n # Update all instances of old variable (should just be in steps).\n assert isinstance(self.step_num, Algebraic_Expression)\n self.step_num = self.step_num.substitute(replace_vars)\n assert isinstance(self.old_step_num, Algebraic_Expression)\n self.old_step_num = self.old_step_num.substitute(replace_vars)\n assert not isinstance(self.num_loops, Algebraic_Expression)\n self.tape = new_tape\n self.num_rule_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_rule += num_steps\n self.verbose_print()\n return\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n # Apply transition\n # Chain move\n if next_state == self.state and next_dir == self.dir and \\\n self.op_state == Turing_Machine.RUNNING:\n num_reps = self.tape.apply_chain_move(symbol2write)\n if num_reps == Tape.INF:\n self.op_state = Turing_Machine.INF_REPEAT\n self.inf_reason = CHAIN_MOVE\n self.verbose_print()\n return\n # Don't need to change state or direction\n self.num_chain_moves += 1\n if self.compute_steps:\n self.step_num += num_steps*num_reps\n self.steps_from_chain += num_steps*num_reps\n # Simple move\n else:\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def halt(self):\n res = int(self._dll.JLINKARM_Halt())\n if res == 0:\n time.sleep(1)\n return True\n return False", "def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False", "def solveOneStep(self):\n ### Student code goes here\n return True", "def _step(self) -> bool:\n\n total_run_time_begin = time.time()\n\n try:\n self.compiled_code_run_time += CC3DPy.call_step(self._current_step)\n except CompuCellSetup.CC3DCPlusPlusError as cc3d_cpp_err:\n self._error_message = cc3d_cpp_err.message\n return False\n\n CC3DPy.store_sim_step_data(self._current_step)\n if not self._check_cc3d():\n self.status = SimStatus.SIM_FAILED\n return False\n\n try:\n CC3DPy.call_steer(self._current_step) # Need an interface to write XML-based data in Python\n self._check_cc3d() # Test if this is necessary\n except CompuCellSetup.CC3DCPlusPlusError as cc3d_cpp_err:\n self._error_message = cc3d_cpp_err.message\n self.status = SimStatus.SIM_FAILED\n return False\n\n for frame_c in self._graphics_frames:\n if self._current_step % frame_c.plot_freq == 0:\n frame_c.frame.draw(blocking=frame_c.blocking)\n\n total_run_time_end = time.time()\n self.total_run_time += (total_run_time_end - total_run_time_begin) * 1000\n\n return True", "def step(self):\r\n for row in self.program:\r\n if row.state == self.state and row.symbol == self.memory[self.pointer]:\r\n self.steps += 1\r\n self.memory[self.pointer] = row.write\r\n self.state=row.new_state\r\n if row.direction in \">rR\":\r\n self.pointer += 1\r\n if self.pointer >= self.mem_max:\r\n self.error = \"Memory limit exceeded.\"\r\n return 0\r\n elif self.pointer >= len(self.memory):\r\n self.memory += [\"#\" for x in range(len(self.memory))]\r\n return 1\r\n elif row.direction in \"<lL\":\r\n self.pointer -= 1\r\n if self.pointer < 0:\r\n self.error = \"Pointer negative.\"\r\n return 0\r\n return 1\r\n return 1\r\n self.error = \"No row matches state and symbol\"\r\n return 0", "def test_conditional_1bit(self):\n shots = 100\n circuits = ref_conditionals.conditional_circuits_1bit(final_measure=True)\n targets = ref_conditionals.conditional_counts_1bit(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def solveOneStep(self):\n if self.currentState.state == self.victoryCondition:\n return True\n\n movables = self.gm.getMovables()\n self.visited[self.currentState] = True\n\n for move in movables:\n self.gm.makeMove(move)\n gs = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)\n if gs in self.visited:\n self.gm.reverseMove(move)\n continue\n self.queue.insert(0, gs)\n gs.parent = self.currentState\n self.gm.reverseMove(move)\n\n while self.queue:\n gs = self.queue.pop()\n if gs in self.visited:\n continue\n self.moveGameState(gs)\n self.currentState = gs\n return False", "def step(self, state):", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def _step(self) -> None:" ]
[ "0.6205759", "0.6091664", "0.5830088", "0.5799696", "0.5696338", "0.56625956", "0.5456521", "0.5393628", "0.53446066", "0.5337814", "0.5318763", "0.52655756", "0.5245858", "0.5241452", "0.5228499", "0.5224713", "0.5211338", "0.52051055", "0.5181581", "0.5164398", "0.51551384", "0.5142853", "0.51425654", "0.5134006", "0.51139224", "0.5103248", "0.51009244", "0.5100411", "0.50621825", "0.5057336" ]
0.6639182
0
Calculates the kriging weights for all of the points in the grid
def _buildWeights(self): # Compute the spatial tree kd = spatial.cKDTree(self.XYin) # Perform query on all of the points in the grid dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear) self.Nc = np.size(self.ind,axis=0) print '%d interpolation points.'%self.Nc # Now loop through and get the weights for each point self.W = np.zeros((self.NNear,self.Nc)) # Print percentages p0=0 pstep=5 for ii in range(0,self.Nc): if self.verbose: pfinish = float(ii)/float(self.Nc)*100.0 if pfinish> p0: print '%3.1f %% complete...'%pfinish p0+=pstep W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1]) self.W[:,ii] = W.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weights(self):", "def grid_weights(self):\n return angular_average_nd(\n field=np.ones((len(self.uvgrid),) * 2),\n coords=[self.uvgrid, self.uvgrid],\n bins=self.u_edges, n=self.ps_dim, bin_ave=False,\n average=False)[0]", "def __weights(self):\n r, c = np.mgrid[:self.size, :self.size] + 0.5\n rad = np.sqrt((r - self.size/2)**2 + (c - self.size/2)**2)\n img = np.zeros((self.size, self.size))\n rmin = np.sqrt(2) * 0.5 * self.damp * rad.max()\n rmax = np.sqrt(2) * 0.5 * rad.max()\n zone = np.logical_and(rad > rmin, rad < rmax)\n img[rad < rmin] = 1.0\n img[rad > rmax] = 0.0\n img[zone] = (rmax - rad[zone]) / (rmax - rmin)\n return img", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def compute_geom_weights(self):\n weights = np.zeros([np.size(self._triangles, 0), 3])\n tris_pts = self._tris_pts\n for ipt in range(3):\n p0 = tris_pts[:, (ipt) % 3, :]\n p1 = tris_pts[:, (ipt+1) % 3, :]\n p2 = tris_pts[:, (ipt-1) % 3, :]\n alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])\n alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])\n # In the below formula we could take modulo 2. but\n # modulo 1. is safer regarding round-off errors (flat triangles).\n angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))\n # Weight proportional to angle up np.pi/2 ; null weight for\n # degenerated cases 0. and np.pi (Note that `angle` is normalized\n # by np.pi)\n weights[:, ipt] = 0.5 - np.abs(angle-0.5)\n return weights", "def lat_weights_regular_grid(lat): \n dlat = np.abs(np.diff(lat))\n np.testing.assert_almost_equal(dlat, dlat[0])\n w = np.abs(np.sin(np.radians(lat + dlat[0] / 2.)) - np.sin(np.radians(lat - dlat[0] / 2.)))\n\n if np.abs(lat[0]) > 89.9999: \n w[0] = np.abs(1. - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n if np.abs(lat[-1]) > 89.9999:\n w[-1] = np.abs(1. - np.sin(np.radians(np.pi / 2 - dlat[0])))\n\n return w", "def weights(self) -> List[float]:", "def calculate_weighted_results():\n pass", "def _compute_weights(self):\n with tf.name_scope('compute_weights'):\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * self.g", "def gaussian_weights(self, pad, feather):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt = np.zeros_like(dist)\n i_feather = (dist >= W/2 - pad - feather) & ( dist <= W/2 -pad )\n wt_feather = np.exp(-((xy[i_feather]-xy0)/(feather/2.))**2)\n wt[ i_feather ] = wt_feather\n wt[ dist <= W/2 - pad - feather ] = 1\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])", "def compute_weights(self):\n\n # Init lists\n weights, weights_k_idx = [], []\n for i_order in range(self.n_orders): # For each orders\n\n weights_n, k_idx_n = self.get_w(i_order) # Compute weights\n\n # Convert to sparse matrix\n # First get the dimension of the convolved grid\n n_kc = np.diff(self.i_bounds[i_order]).astype(int)[0]\n\n # Then convert to sparse\n weights_n = atoca_utils.sparse_k(weights_n, k_idx_n, n_kc)\n weights.append(weights_n), weights_k_idx.append(k_idx_n)\n\n return weights, weights_k_idx", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self, beta=1):\n N = self.__len__()\n # Allocate memory\n W = np.zeros((N,N), dtype='float')\n \n for i in range(N):\n for j in range(N):\n if i > j:\n W[i,j] = self._kernel(self.flat_image[i],\n self.flat_image[j], \n beta)\n W += W.T \n for i in range(N):\n W[i,i] = 1\n \n return W", "def computesWingsMeshPoints(self):\n self.userAskedNNodesWings = np.zeros(self.nWings)\n self.ws_me_points = []\n self.ws_ma_points = []\n self.ws_me_distances = []\n self.ws_sg_lengths = []\n self.ws_ma_distance = []\n self.ws_me_pointsName = []\n self.ws_me_pointsInitArea = []\n for i in range(self.nWings):\n\n # Basic wing input check\n self.userAskedNNodesWings[i] = self.settings[\"wing\"+str(i+1)][\"FEM\"][\"nodesFEM\"]\n w_m_N_nodes = int(self.userAskedNNodesWings[i])\n if w_m_N_nodes < 2:\n logger.error(\"Not enough points for wing\"+str(i+1)+\" (min 2)\")\n sys.exit()\n\n logger.debug(\"Number of wing nodes asked: \"+str(w_m_N_nodes))\n # distance from leading edge to the elastic axis\n ##################################################################\n # Be very careful with what verion of TiGl you are using! It looks\n # like the order is inverted in last version at least I have\n # eperianced some issues between home and CFSE computer.\n ##################################################################\n # xsiEl = 1 - self.settings['wing' + str(i+1)]['elasticAxis']\n xsiEl = self.settings['wing' + str(i+1)]['elasticAxis']\n \n # distance between the mass axis and the elastic axis\n xsiMa = self.settings['wing' + str(i+1)]['massAxis']\n logger.debug(\"Wing\"+str(i+1)+\" Elastic center is: \"+str(xsiEl))\n wingIndex = i+1\n\n # Gets the number of segment and sections for each wing\n w_N_sg = self.tigl.wingGetSegmentCount(i+1)\n w_N_sc = self.tigl.wingGetSectionCount(i+1)\n logger.debug(\"Wing\"+str(i+1)+\" has \"+str(w_N_sg)+\" segments\")\n logger.debug(\"Wing\"+str(i+1)+\" has \"+str(w_N_sc)+\" sections\")\n if w_m_N_nodes < w_N_sc:\n logger.warning(\"Wing mesh underdetermined, less points than actual CPACS sections\")\n\n # Gets each segments starting and ending points\n w_sg_points = np.empty((w_N_sg+1,3))\n for j in range(w_N_sg):\n w_sg_points[j] = self.getWingCamberLinePoint(wingIndex,j+1,0,xsiEl)\n w_sg_points[-1] = self.getWingCamberLinePoint(wingIndex,j+1,1,xsiEl)\n logger.debug(\"Wing\"+str(wingIndex)+\" segment points:\\n\"+str(w_sg_points))\n\n # Gets each segments length\n w_sg_length = np.empty(w_N_sg)\n w_sg_relativePosition = np.empty(w_N_sg+1)\n w_length = 0\n for j in range(w_N_sg):\n w_sg_relativePosition[j] = w_length\n length = np.linalg.norm(w_sg_points[j] - w_sg_points[j+1])\n w_sg_length[j] = length\n w_length += length\n w_sg_relativePosition[-1] = w_length\n logger.debug(\"Wing\"+str(wingIndex)+\" segments lengths are:\\n\"+str(w_sg_length))\n logger.debug(\"Wing\"+str(wingIndex)+\" segments relative positions are:\\n\"+str(w_sg_relativePosition))\n logger.debug(\"Wing\"+str(wingIndex)+\" length is:\"+str(w_length))\n\n # Computes mesh relative points\n w_m_relativePoints = np.linspace(0, w_length, w_m_N_nodes)\n logger.debug(\"Wing\"+str(wingIndex)+\" relative mesh points:\\n\"+str(w_m_relativePoints))\n\n # If the user askes more points that there sections in the CPACS\n # file definitions the program automatically changes the position\n # to the closest known point to the center of the section. This\n # features ensures that the simulations will be made with maximal\n # fidelity to the definintion.\n #\n # WARNING:\n # After some testing it looks like this feature induces errors\n # instead of erasing them.\n #\n # logger.debug(\"+\"*20)\n # logger.debug(\"wing relative pos:\\n\"+str(w_sg_relativePosition))\n # logger.debug(\"mesh relative pos:\\n\"+str(w_m_relativePoints))\n # if w_N_sc <= w_m_N_nodes:\n # for j in range(w_N_sc):\n # diff = np.abs(w_m_relativePoints - w_sg_relativePosition[j])\n # index = np.argmin(diff)\n # w_m_relativePoints[index] = w_sg_relativePosition[j]\n\n logger.debug(\"mesh relative pos:\\n\"+str(w_m_relativePoints))\n\n # Computes the eta for each segment in order to get the mesh point\n # from tigl\n w_me_points = np.empty((w_m_N_nodes,3))\n w_ma_points = np.empty((w_m_N_nodes,3))\n w_me_distances = np.empty((w_m_N_nodes-1))\n w_ma_distance = np.empty((w_m_N_nodes,3))\n w_me_pointsName = []\n w_me_pointsInitArea = np.empty(w_m_N_nodes)\n for j in range(w_m_N_nodes):\n # finds in which segment the mesh point will be\n relativePosition = w_m_relativePoints[j]\n dist = w_sg_relativePosition - relativePosition\n segmentIndex = np.argmin(np.abs(dist))+1\n # o--x-------o situations\n if dist[segmentIndex-1] < 0:\n case = 1\n eta = w_m_relativePoints[j] - w_sg_relativePosition[segmentIndex-1]\n eta = (eta/w_sg_length[segmentIndex-1])\n # o--x-------o situation\n elif dist[segmentIndex-1] > 0:\n case = 2\n eta = w_sg_relativePosition[segmentIndex-1] - w_m_relativePoints[j]\n segmentIndex = segmentIndex - 1\n eta = 1 - (eta/w_sg_length[segmentIndex-1])\n elif dist[segmentIndex-1] == 0.0 and segmentIndex == 1:\n case = 3\n eta = 0\n elif dist[segmentIndex-1] == 0.0 and segmentIndex != 1:\n case = 4\n eta = 1\n segmentIndex -= 1\n else:\n logger.error(\"Something wrong with CPACS file\")\n sys.exit()\n # logger.debug()\n logger.debug(\"case \"+str(case)+\" eta = \"+str(eta))\n\n # Gets the wing mesh points. Theses points will be always on\n # the camber line.\n w_me_points[j] = self.getWingCamberLinePoint(wingIndex,segmentIndex,eta,xsiEl)\n w_ma_points[j] = self.getWingCamberLinePoint(wingIndex,segmentIndex,eta,xsiEl-xsiMa)\n if j > 0:\n length = np.linalg.norm(w_me_points[j] - w_me_points[j-1])\n w_me_distances[j-1] = length\n # Distance from elastic axis\n w_ma_distance[j] = w_me_points[j] - w_ma_points[j]\n name = \"w_\"+str(i+1)+\"_n_\"+str(j)\n if self.nFuselage == 0:\n if np.abs(w_me_points[j][1]) < 1e-2:\n name = \"w_n_clamped\"\n w_me_pointsName.append(name)\n # Computes section area\n area = self.computePointSectionArea(wingIndex,segmentIndex,eta,xsiEl)\n w_me_pointsInitArea[j] = area\n \n logger.debug(w_me_points)\n logger.debug(w_ma_points)\n # sys.exit()\n # For reference, in tigl3wrapper.py the symmetry is defined as such:\n #\n # class TiglSymmetryAxis(object):\n # TIGL_NO_SYMMETRY = 0\n # TIGL_X_Y_PLANE = 1\n # TIGL_X_Z_PLANE = 2\n # TIGL_Y_Z_PLANE = 3\n symmetry = self.tigl.wingGetSymmetry(i+1)\n if symmetry > 0:\n w_me_points_copy = np.copy(w_me_points)\n w_ma_points_copy = np.copy(w_ma_points)\n w_ma_distance_copy = np.copy(w_ma_distance)\n w_me_pointsName_copy = w_me_pointsName.copy()\n w_me_pointsInitArea_c = np.copy(w_me_pointsInitArea)\n if symmetry == 1:\n index = 2\n elif symmetry == 2:\n index = 1\n elif symmetry == 3:\n index = 0\n\n # Computes symmetric points\n for k in range(w_m_N_nodes):\n w_me_points_copy[k][index] = - w_me_points[k,index]\n w_ma_points_copy[k][index] = - w_ma_points[k,index]\n w_ma_distance_copy[k][index] = - w_ma_distance[k,index]\n w_me_pointsName_copy[k] = w_me_pointsName_copy[k] + \"sym\"\n # The -1 avoids copying two times the \"same\" point\n w_me_points = np.concatenate((np.flip(w_me_points_copy[1:],axis=0),w_me_points))\n w_ma_points = np.concatenate((np.flip(w_ma_points_copy[1:],axis=0),w_ma_points))\n w_me_distances = np.concatenate((np.flip(w_me_distances), w_me_distances))\n w_ma_distance = np.concatenate((np.flip(w_ma_distance_copy[1:],axis=0), w_ma_distance))\n rev = w_me_pointsName_copy[::-1]\n w_me_pointsName = rev[:-1] + w_me_pointsName\n # logger.debug(w_m_pointsInitArea)\n # logger.debug(np.flip(w_me_pointsInitArea_c))\n w_me_pointsInitArea = np.concatenate((np.flip(w_me_pointsInitArea_c[1:],axis=0),w_me_pointsInitArea))\n\n logger.debug(\"Wing mesh points:\\n\"+str(w_me_points))\n self.ws_me_points.append(w_me_points)\n self.ws_ma_points.append(w_ma_points)\n\n # me_distance is the distance betweent two points of the strcutral\n # mesh size\n self.ws_me_distances.append(w_me_distances)\n # self.ws_sg_lengths.append(w_sg_length)\n\n # mass distance is the distance between the elastic line and the\n # mass line\n self.ws_ma_distance.append(w_ma_distance)\n self.ws_me_pointsInitArea.append(w_me_pointsInitArea)\n self.ws_me_pointsName.append(w_me_pointsName)", "def calcweighted(store):\n nobs = store['yvec'].shape[0]\n store['Upper'].put(-store['rho'], range(0, nobs - 1), range(1, nobs))\n store['Upper'].matvec(store['yvec'], store['yvectil'])\n for i in xrange(store['xmat'].shape[1]):\n store['Upper'].matvec(store['xmat'][:, i], store['xmattil'][:, i])", "def test_weighting_implementation():\n\n # generate two locusts of points\n npts = 100\n epsilon = 0.05\n # cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # generate orientation vectors for cluster 1\n vectors1 = generate_aligned_vectors(len(coords1))\n\n # generate a random index value to check for each cluster\n idx = np.random.randint(npts)\n idx2 = np.random.randint(npts)\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n # s, vector between coords1 and cluster2\n s = np.zeros((3))\n s[0] = coords2[idx2, 0] - coords1[idx, 0]\n s[1] = coords2[idx2, 1] - coords1[idx, 1]\n s[2] = coords2[idx2, 2] - coords1[idx, 2]\n\n # calculate dot product between orientation and direction between cluster 1 and 2\n angles = angles_between_list_of_vectors(vectors1[idx], s)\n costheta = np.cos(angles) # dot product between vectors\n\n idx_costheta = costheta\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # define weights appropiate for weighting function\n weights1 = np.zeros((npts, 4))\n weights1[idx] = 1.0\n weights1[:, 1] = vectors1[:, 0]\n weights1[:, 2] = vectors1[:, 1]\n weights1[:, 3] = vectors1[:, 2]\n weights2 = np.zeros(npts)\n weights2[idx2] = 1.0\n\n # calculate weighted counts\n\n # weighting 1\n # calculate weighted counts\n weighted_counts, counts = positional_marked_npairs_3d(\n coords1,\n coords2,\n rbins,\n period=None,\n weights1=weights1,\n weights2=weights2,\n weight_func_id=1,\n num_threads=1,\n )\n\n msg = \"weighted counts do not match expected result given the weighting function\"\n assert np.isclose(weighted_counts[-1], idx_costheta, rtol=0.01 / npts), msg", "def _compute_weights(self, X, target_X, method='l2'):\n if method == 'proj_l2' or method == 'proj_l2_nonorm':\n #\n # At first calculate unrestricted weights: (X.T)^-1\n # Then project answer onto Unit simplex\n # \n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n weights = project_onto_simplex(w, normalize=True if method == 'proj_l2' else False)\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n #\n # Pure solution, which make unrestricted weights\n #\n # Compute target center multiplied by number of source rows\n # target_center = np.mean(target_X, axis=0) * X.shape[0]\n # Solve the system\n # print(\"X^T shape: ({}), target_center^T shape: ({})\".format(X.T.shape, target_center.T.shape))\n # w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n # print(w)\n # return w.T\n if method == 'dist' or method == 'dist2':\n print(\"Using distance weighting\")\n target_center = np.mean(target_X, axis=0)\n residuals = X - target_center\n norm = np.linalg.norm(residuals, axis=1)\n print(f\"Max norm: {np.max(norm)}\")\n if method == 'dist':\n weights = np.max(norm) - norm # inverse weights\n elif method == 'dist2':\n small_eps = 1e-9\n weights = 1.0 / (norm + small_eps)\n weights = np.exp(weights) # softmax\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n # Compute target center multiplied by number of source rows\n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n q = cp.Constant(value=target_center.flatten())\n x_ = cp.Constant(value=X)\n\n w = cp.Variable(X.shape[0])\n # lam = self.optimization_lambda # 0.001\n # M = len(J)\n M = np.linalg.norm(X) ** 2 # target_X)\n print(\"M:\", M)\n lam = self.reg_lambda # 0.1\n if lam == 0:\n print(\"No regularization\")\n # cp.norm2(cp.matmul(X, beta) - Y)**2\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_)) # cp.Minimize(cp.sum_squares(q - x_ * w))\n else:\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_) / M + lam * cp.norm2(w)) # + lam * cp.norm2(w))\n constraints = [w >= 0, cp.sum_entries(w) == 1] #, w >= self.simplex_lower_boundary]\n prob = cp.Problem(objective, constraints)\n\n print(\"Problem is prepared\")\n\n try:\n result = prob.solve()\n except Exception as ex:\n print(\"Exception occurred: {}\".format(ex))\n print(\"Using SCS solver\")\n result = prob.solve(solver=cp.SCS, verbose=False)\n print(\"Problem status: {}\".format(prob.status))\n try:\n weights = w.value.A.flatten()\n except Exception as ex:\n print(\"Can't compute weights, use uniform distribution\")\n weights = np.ones((X.shape[0],)) / X.shape[0]\n print(weights)\n weights[weights < 0] = 0\n weights_sum = np.sum(weights)\n print(\"Weights sum: {}\".format(weights_sum))\n if weights_sum != 1.0: # probably always true\n weights /= weights_sum\n return weights", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def optimal_weights(self,n_points,er,cov):\n \n target_rs = np.linspace(er.min(), er.max(), n_points)\n weights = [self.minimize_vol(target_return, er, cov) for target_return in target_rs]\n \n return weights", "def get_weights(self):\n return self.weights\n #print(W)", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc", "def get_weights(self):\n\t\treturn self.V", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def weights(self):\r\n\t\treturn None", "def get_w(self, i_order):\n\n log.debug('Computing weights and k.')\n\n # Get needed attributes\n wave_grid, mask = self.get_attributes('wave_grid', 'mask')\n\n # ... order dependent attributes\n attrs = ['wave_p', 'wave_m', 'mask_ord', 'i_bounds']\n wave_p, wave_m, mask_ord, i_bnds = self.get_attributes(*attrs, i_order=i_order)\n\n # Use the convolved grid (depends on the order)\n wave_grid = wave_grid[i_bnds[0]:i_bnds[1]]\n\n # Compute the wavelength coverage of the grid\n d_grid = np.diff(wave_grid)\n\n # Get lo hi\n lo, hi = self._get_lo_hi(wave_grid, i_order) # Get indexes\n\n # Compute only valid pixels\n wave_p, wave_m = wave_p[~mask], wave_m[~mask]\n ma = mask_ord[~mask]\n\n # Number of used pixels\n n_i = len(lo)\n i = np.arange(n_i)\n\n # Define first and last index of wave_grid\n # for each pixel\n k_first, k_last = -1 * np.ones(n_i), -1 * np.ones(n_i)\n\n # If lowest value close enough to the exact grid value,\n # NOTE: Could be approximately equal to the exact grid\n # value. It would look like that.\n # >>> lo_dgrid = lo\n # >>> lo_dgrid[lo_dgrid==len(d_grid)] = len(d_grid) - 1\n # >>> cond = (grid[lo]-wave_m)/d_grid[lo_dgrid] <= 1.0e-8\n # But let's stick with the exactly equal\n cond = (wave_grid[lo] == wave_m)\n\n # special case (no need for lo_i - 1)\n k_first[cond & ~ma] = lo[cond & ~ma]\n wave_m[cond & ~ma] = wave_grid[lo[cond & ~ma]]\n\n # else, need lo_i - 1\n k_first[~cond & ~ma] = lo[~cond & ~ma] - 1\n\n # Same situation for highest value. If we follow the note\n # above (~=), the code could look like\n # >>> cond = (wave_p-grid[hi])/d_grid[hi-1] <= 1.0e-8\n # But let's stick with the exactly equal\n cond = (wave_p == wave_grid[hi])\n\n # special case (no need for hi_i - 1)\n k_last[cond & ~ma] = hi[cond & ~ma]\n wave_p[cond & ~ma] = wave_grid[hi[cond & ~ma]]\n\n # else, need hi_i + 1\n k_last[~cond & ~ma] = hi[~cond & ~ma] + 1\n\n # Generate array of all k_i. Set to -1 if not valid\n k_n, bad = atoca_utils.arange_2d(k_first, k_last + 1, dtype=int)\n k_n[bad] = -1\n\n # Number of valid k per pixel\n n_k = np.sum(~bad, axis=-1)\n\n # Compute array of all w_i. Set to np.nan if not valid\n # Initialize\n w_n = np.zeros(k_n.shape, dtype=float)\n ####################\n ####################\n # 4 different cases\n ####################\n ####################\n\n # Valid for every cases\n w_n[:, 0] = wave_grid[k_n[:, 1]] - wave_m\n w_n[i, n_k - 1] = wave_p - wave_grid[k_n[i, n_k - 2]]\n\n ##################\n # Case 1, n_k == 2\n ##################\n case = (n_k == 2) & ~ma\n if case.any():\n\n log.debug('n_k = 2 in get_w().')\n\n # if k_i[0] != lo_i\n cond = case & (k_n[:, 0] != lo)\n w_n[cond, 1] += wave_m[cond] - wave_grid[k_n[cond, 0]]\n\n # if k_i[-1] != hi_i\n cond = case & (k_n[:, 1] != hi)\n w_n[cond, 0] += wave_grid[k_n[cond, 1]] - wave_p[cond]\n\n # Finally\n part1 = (wave_p[case] - wave_m[case])\n part2 = d_grid[k_n[case, 0]]\n w_n[case, :] *= (part1 / part2)[:, None]\n\n ##################\n # Case 2, n_k >= 3\n ##################\n case = (n_k >= 3) & ~ma\n if case.any():\n\n log.debug('n_k = 3 in get_w().')\n n_ki = n_k[case]\n w_n[case, 1] = wave_grid[k_n[case, 1]] - wave_m[case]\n w_n[case, n_ki - 2] += wave_p[case] - wave_grid[k_n[case, n_ki - 2]]\n\n # if k_i[0] != lo_i\n cond = case & (k_n[:, 0] != lo)\n nume1 = wave_grid[k_n[cond, 1]] - wave_m[cond]\n nume2 = wave_m[cond] - wave_grid[k_n[cond, 0]]\n deno = d_grid[k_n[cond, 0]]\n w_n[cond, 0] *= (nume1 / deno)\n w_n[cond, 1] += (nume1 * nume2 / deno)\n\n # if k_i[-1] != hi_i\n cond = case & (k_n[i, n_k - 1] != hi)\n n_ki = n_k[cond]\n nume1 = wave_p[cond] - wave_grid[k_n[cond, n_ki - 2]]\n nume2 = wave_grid[k_n[cond, n_ki - 1]] - wave_p[cond]\n deno = d_grid[k_n[cond, n_ki - 2]]\n w_n[cond, n_ki - 1] *= (nume1 / deno)\n w_n[cond, n_ki - 2] += (nume1 * nume2 / deno)\n\n ##################\n # Case 3, n_k >= 4\n ##################\n case = (n_k >= 4) & ~ma\n if case.any():\n log.debug('n_k = 4 in get_w().')\n n_ki = n_k[case]\n w_n[case, 1] += wave_grid[k_n[case, 2]] - wave_grid[k_n[case, 1]]\n w_n[case, n_ki - 2] += (wave_grid[k_n[case, n_ki - 2]]\n - wave_grid[k_n[case, n_ki - 3]])\n\n ##################\n # Case 4, n_k > 4\n ##################\n case = (n_k > 4) & ~ma\n if case.any():\n log.debug('n_k > 4 in get_w().')\n i_k = np.indices(k_n.shape)[-1]\n cond = case[:, None] & (2 <= i_k) & (i_k < n_k[:, None] - 2)\n ind1, ind2 = np.where(cond)\n w_n[ind1, ind2] = (d_grid[k_n[ind1, ind2] - 1]\n + d_grid[k_n[ind1, ind2]])\n\n # Finally, divide w_n by 2\n w_n /= 2.\n\n # Make sure invalid values are masked\n w_n[k_n < 0] = np.nan\n\n return w_n, k_n" ]
[ "0.6455532", "0.63486326", "0.6317665", "0.6211078", "0.6192685", "0.61866015", "0.6092461", "0.6031609", "0.6025631", "0.60012954", "0.59838605", "0.59765804", "0.59765804", "0.59717125", "0.59193355", "0.59113336", "0.58744186", "0.58514315", "0.5846845", "0.5845702", "0.5837908", "0.57965374", "0.5790616", "0.5781395", "0.57486963", "0.57486963", "0.57486963", "0.57486963", "0.57436115", "0.574054" ]
0.68984425
0
Calculates the variance of the spectrum.
def spectral_variance(data, fft_data): return np.var(np.abs(fft_data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)", "def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime", "def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v", "def variance(self) -> float:\n return (self._squared_interval_sum / len(self.intervals)) - (self.mean() * self.mean())", "def variance(self):\n return (math.exp(self.sigma ** 2) - 1.0) \\\n * math.exp(2.0 * self.mu + self.sigma ** 2)", "def variance(sign, FS):\n time = compute_time(sign, FS)\n soma_den = 0\n soma_num = 0\n for z in range(0, len(sign)):\n soma_num = soma_num + (time[z]*((sign[z]*np.mean(sign))**2))\n soma_den = soma_den + time[z]\n\n return soma_num/soma_den", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\r\n n = len(x)\r\n deviations = dev_mean(x)\r\n return sum_of_squares(deviations) / (n-1)", "def variance(self):\n return self.sigma", "def variance(self, avg=False):\n if not self.fp_init:\n if not avg:\n return self._calc_var(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_var(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._var if not avg else self._var_avg", "def variance(self):\n return self.k * self.theta ** 2", "def variance(self, mean=None):\n raise NotImplementedError", "def variance(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n mean = self.mean()\n weighted_central_moment = sum(\n count * (value - mean) ** 2 for value, count in clean.items()\n )\n return weighted_central_moment / total", "def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance", "def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean", "def var(self):\n return self._reduce_for_stat_function(F.variance, only_numeric=True)", "def _calculate_excess_variance(self, lc):\n std = self._calculate_std(lc)\n return np.var(lc) - std**2", "def compute_variance(\n self,\n parameters: NDArray,\n resids: NDArray,\n sigma2: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n ) -> NDArray:", "def _compute_variance(params):\n batch_grad = self._fetch_batch_grad(params, aggregate=True)\n grad = self._fetch_grad(params, aggregate=True)\n batch_size = batch_grad.size(0)\n\n if self._use_double:\n batch_grad = batch_grad.double()\n grad = grad.double()\n\n return (1 / (batch_size - 1)) * ((batch_size * batch_grad - grad) ** 2).sum(\n 0\n )", "def variance(self):\n if self.dirty:\n self._finalize()\n return self.vvar", "def variance(y, w):\n # w = clip_normalize(w)\n # Compute the expectance (d_y, n_q)\n y_q_exp = np.dot(y.T, w)\n\n # Compute the expectance of squares (d_y, n_q)\n y_q_exp_sq = np.dot((y ** 2).T, w)\n\n # Compute the variance (d_y, n_q)\n return y_q_exp_sq - (y_q_exp ** 2)", "def calculate_variance(X):\n\tmean = np.ones(np.shape(X)) * X.mean(0)\n\tn_samples = np.shape(X)[0]\n\tvariance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean))\n\treturn variance", "def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))", "def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance" ]
[ "0.77939093", "0.7585608", "0.74999875", "0.74020314", "0.74020314", "0.73374844", "0.73331225", "0.7285661", "0.72689235", "0.7261629", "0.70953506", "0.70953506", "0.70953506", "0.709008", "0.7080148", "0.7017273", "0.7012545", "0.70008934", "0.699682", "0.6989067", "0.69625527", "0.69300467", "0.6908478", "0.6907567", "0.6853641", "0.68476635", "0.6832369", "0.6822154", "0.681728", "0.6793173" ]
0.78063816
0
Calculate the skewness of the spectrum.
def spectral_skewness(data, fft_data): return scipy.stats.skew(np.abs(fft_data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skewness(self):\n self._finalize()\n return self.vskewness", "def spectral_skewness(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not spectral_spread(sign, fs):\n return 0\n else:\n skew = ((f - spect_centr) ** 3) * (ff / np.sum(ff))\n return np.sum(skew) / (spectral_spread(sign, fs) ** (3 / 2))", "def calc_skewness(sig):\n return skew(sig)", "def skew(self) -> float:\n return float(ss.skew(self.tsdf.pct_change().values, bias=True, nan_policy='omit'))", "def skewness(r):\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**3).mean()\n return exp/sigma_r**3", "def skewness(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n apb = a + b\n return 2*(b - a)*mp.sqrt(apb + 1)/((apb + 2)*(mp.sqrt(a*b)))", "def mlc_skew(self) -> float:\n return float(np.mean([p.skew() for p in self.pickets]))", "def skew(self) -> float:\n return float(np.rad2deg(self.fit.coefficients[0]))", "def skewness(self,return_series:pd.Series): \n\n demeaned_returns = return_series - return_series.mean()\n\n # Use the population standard deviation, so set dof=0\n sigma_r = return_series.std(ddof=0)\n exp = (demeaned_returns ** 3).mean()\n return exp/sigma_r ** 3", "def skew(self):\n \n v = self.v; w = self.w;\n \n # the following matrix is at odds with H&Z pg. 72\n return np.array([\n [ 0, v[2], -v[1], w[0]],\n [-v[2], 0 , v[0], w[1]],\n [ v[1], -v[0], 0, w[2]],\n [-w[0], -w[1], -w[2], 0 ]\n ])", "def skew(x, t=1):\n dx = x[t:] - x[:-t]\n dx = dx - np.mean(dx)\n return np.mean(dx ** 3) / np.mean(dx ** 2) ** 1.5", "def get_skewness_and_kurtosis(self):\n self._set_statistics()\n return self.statistics_object.get_skewness(), self.statistics_object.get_kurtosis()", "def gen_skew_peak(freqs, cen, height, scale, skew):\n\n ys = stats.skewnorm.pdf(freqs, skew, cen, scale)\n\n # Scale to (0, 1), then apply power transform\n ys = (ys / np.abs(ys).max()) * height\n\n return ys", "def skew(x):\n return np.array([\n [0, -1, x[1]],\n [1, 0, -x[0]],\n [-x[1], x[0], 0]\n ])", "def skew(x):\n return np.array([[0, -x[2], x[1]],\n [x[2], 0, -x[0]],\n [-x[1], x[0], 0]])", "def skew(q):\n S = SX.zeros(3, 3)\n S[0, 1] = -q[2]; S[0, 2] = q[1]\n S[1, 0] = q[2]; S[1, 2] = -q[0]\n S[2, 0] = -q[1]; S[2, 1] = q[0]\n return S", "def is_skew_symmetric(self):\n return self._info['skew_symmetric']", "def skew(x):\n return np.array([[0, -x[2], x[1]],\n [x[2], 0, -x[0]],\n [-x[1], x[0], 0]])", "def lskew(inlist):\r\n return moment(inlist,3)/pow(moment(inlist,2),1.5)", "def skew(x):\n X = np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])\n return X", "def spectral_kurtosis(sign, fs):\n f, ff = plotfft(sign, fs)\n if not spectral_spread(sign, fs):\n return 0\n else:\n spect_kurt = ((f - spectral_centroid(sign, fs)) ** 4) * (ff / np.sum(ff))\n return np.sum(spect_kurt) / (spectral_spread(sign, fs)**2)", "def skew_func(self, months_from_last: int = None, from_date: dt.date = None, to_date: dt.date = None) -> float:\n earlier, later = self.calc_range(months_from_last, from_date, to_date)\n return float(ss.skew(self.tsdf.loc[earlier:later].pct_change(), bias=True, nan_policy='omit'))", "def skew_kurtosis_value(df, feature):\r\n skewness = df[feature].skew()\r\n kurtosis = df[feature].kurt()\r\n\r\n print(\"Skewness: {}\".format(round(skewness, 2)))\r\n if skewness > 0:\r\n print(\"Positive/right skewness: mean and median > mode.\")\r\n else:\r\n print(\"Negative/left skewness: mean and median < mode\")\r\n\r\n print(\"\\nKurtosis: {}\".format(round(kurtosis, 2)))\r\n if kurtosis > 3:\r\n print(\"Leptokurtic: more outliers\")\r\n else:\r\n print(\"Platykurtic: less outliers\")", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def tfdv_skew_validator(feature_name, train_stats, serve_stats, schema, threshold):\n #this doesn't display skew anomalies as the book shows\n tfdv.get_feature(schema, feature_name).skew_comparator.infinity_norm.threshold = threshold\n skew_anomalies = tfdv.validate_statistics(statistics = train_stats,\n schema = schema,\n serving_statistics = serve_stats)\n tfdv.display_anomalies(skew_anomalies)\n return skew_anomalies", "def is_skew_symmetric(self):\n return self.all_equal(-self.transpose())", "def skew_list(self):\n skew_list = self.df.skew().map(lambda x: abs(x)).sort_values(ascending=False).index\n if len(skew_list) > 3:\n skew_list = skew_list[:3]\n return skew_list", "def skew(self) -> FrameLike:\n return super().skew()", "def skew(self) -> FrameLike:\n return super().skew()", "def skew(self) -> FrameLike:\n return super().skew()" ]
[ "0.79790175", "0.7622297", "0.75499445", "0.74275786", "0.7356237", "0.69568706", "0.66155034", "0.66151226", "0.6565566", "0.65512294", "0.6200791", "0.60718316", "0.5972047", "0.5872354", "0.5837397", "0.58353823", "0.5832275", "0.5814251", "0.5724178", "0.57171047", "0.56981385", "0.5679786", "0.56792825", "0.5586739", "0.5574275", "0.55261743", "0.5507676", "0.5478332", "0.5478332", "0.5478332" ]
0.79276204
1
Calculates a measure for the flatness of the spectrum.
def spectral_flatness(data, fft_data): spec = np.abs(fft_data) spec_mean = np.mean(spec) spec_gmean = scipy.stats.gmean(spec) if spec_mean == 0: return 1 return spec_gmean/spec_mean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spectral_spread(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not np.sum(ff):\n return 0\n else:\n return np.dot(((f-spect_centr)**2), (ff / np.sum(ff)))", "def spectral_slope(sign, fs):\n f, ff = plotfft(sign, fs)\n if not(list(f)):\n return 0\n else:\n if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):\n return 0\n else:\n return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)", "def is_freq_flat(self):\n return (\n len(self.energy_absorption[\"coeffs\"]) == 1\n and len(self.scattering[\"coeffs\"]) == 1\n )", "def stSpectralFlux(X, Xprev):\n # compute the spectral flux as the sum of square distances:\n sumX = numpy.sum(X + eps)\n sumPrevX = numpy.sum(Xprev + eps)\n F = numpy.sum((X / sumX - Xprev/sumPrevX) ** 2)\n\n return F", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def measure_flats(det, shutter, quantity, samStage, samPos):\n priorPosition = samStage.position\n yield from set_white_frame()\n yield from bps.mv(\n samStage, samPos,\n shutter, \"open\")\n yield from _acquire_n_frames(det, quantity)\n yield from bps.mv(samStage, priorPosition)", "def spectral_skewness(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not spectral_spread(sign, fs):\n return 0\n else:\n skew = ((f - spect_centr) ** 3) * (ff / np.sum(ff))\n return np.sum(skew) / (spectral_spread(sign, fs) ** (3 / 2))", "def _spectrum(self, energy):\n return self.pref*energy**2*self.occupation_number(energy/self.kT)", "def power_spectral_density(var):\n\n n = len(var)\n Y = np.fft.fft(var)/n # fft computing and normalization\n Y = Y[range(int(n/2))]\n k = np.arange(n)\n Ts = 30*60 # sampling interval (sec)\n Fs = 1./Ts\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(int(n/2))] # one side frequency range\n\n return frq, abs(Y)", "def filter_spectrum(spectrum):\n # avoid division by 0\n spectrum.hs[1:] /= spectrum.fs[1:]\n spectrum.hs[0] = 0", "def simple_flatfield_corr(flat, real, Mfaktor, Pfaktor):\n real=real.astype(np.float64)\n flat=flat.astype(np.float64)\n # init_max=np.max(real)/4095\n print(np.max(real))\n flat=NormalizeData(flat)\n flat=flat*Mfaktor \n m=np.average(flat)\n gain=m/(flat+Pfaktor) \n C=np.multiply(gain, real) \n print(np.max(C))\n return C", "def spectral_brightness(data, fft_data):\n spec = np.abs(fft_data)\n weight_vec = np.log(np.linspace(1, 100, len(fft_data)))\n weight_vec = np.pi*weight_vec/weight_vec[-1]\n weight = np.cos(weight_vec)/2 + 0.5\n low_spec_sum = sum(spec*weight)\n high_spec_sum = sum(spec*(1-weight))\n if low_spec_sum == 0:\n return 1 # attention: if signal is a sine at fs/2 this is also hit\n return high_spec_sum/low_spec_sum", "def test_flat_signal(self):\n ts = self.create_ts(magnitude=0, signal_to_noise_ratio=0)\n\n for anomaly_magnitude in (0, 100):\n ts.value[95] += anomaly_magnitude\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response = model.fit_predict(ts[90:], ts[:90])\n actual_z_score = self.calc_z_score(\n ts.value[95],\n response.predicted_ts.value[5],\n response.confidence_band.upper.value[5],\n response.confidence_band.lower.value[5],\n )\n self.assertAlmostEqual(response.scores.value[5], actual_z_score, places=15)", "def get_flux_density(self):\n if self.no_flux is False:\n return self.snu_at_1GHz\n else:\n return -1", "def fwhm(self):\n vals = self.transmit / self.transmit.max() - 0.5\n zero_crossings = np.where(np.diff(np.sign(vals)))[0]\n lambs = self.wavelength[zero_crossings]\n return np.diff(lambs)[0]", "def spectral_decrease(sign, fs):\n f, ff = plotfft(sign, fs)\n\n k = len(ff)\n soma_num = 0\n for a in range(2, k):\n soma_num = soma_num + ((ff[a]-ff[1])/(a-1))\n\n ff2 = ff[2:]\n if not np.sum(ff2):\n return 0\n else:\n soma_den = 1 / np.sum(ff2)\n return soma_den * soma_num", "def rms_flat(a):\n return np.sqrt(np.mean(np.absolute(a) ** 2))", "def PowerSpectralDensity(f):\n sky_averaging_constant = (20/3) # Sky Averaged <--- I got this from Jonathan's notes but I need\n # to check where he got it...\n L = 2.5*10**9 # Length of LISA arm\n f0 = 19.09*10**-3 \n\n Poms = ((1.5*10**-11)**2)*(1 + ((2*10**-3)/f)**4) # Optical Metrology Sensor\n Pacc = (3*10**-15)**2*(1 + (4*10**-3/(10*f))**2)*(1 + (f/(8*10**-3))**4) # Acceleration Noise\n Sc = 9*10**(-45)*f**(-7/3)*np.exp(-f**0.171 + 292*f*np.sin(1020*f)) * (1 \\\n + np.tanh(1680*(0.00215 - f))) \n\n PSD = (sky_averaging_constant)* ((10/(3*L**2))*(Poms + (4*Pacc)/((2*np.pi*f))**4)*(1 + 0.6*(f/f0)**2) + Sc) # PSD\n return PSD", "def analytic_ft(self, x, y):\n from .otf import diffraction_limited_mtf\n r, p = cart_to_polar(x, y)\n return diffraction_limited_mtf(self.fno, self.wavelength, r*1e3) # um to mm", "def spectral_density(wav, factor=None):\n from .core import UnitBase\n\n if isinstance(wav, UnitBase):\n if factor is None:\n raise ValueError(\"If `wav` is specified as a unit, `factor` should be set\")\n wav = factor * wav # Convert to Quantity\n c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s\n h_cgs = _si.h.cgs.value # erg * s\n hc = c_Aps * h_cgs\n\n # flux density\n f_la = cgs.erg / si.angstrom / si.cm**2 / si.s\n f_nu = cgs.erg / si.Hz / si.cm**2 / si.s\n nu_f_nu = cgs.erg / si.cm**2 / si.s\n la_f_la = nu_f_nu\n phot_f_la = astrophys.photon / (si.cm**2 * si.s * si.AA)\n phot_f_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz)\n la_phot_f_la = astrophys.photon / (si.cm**2 * si.s)\n\n # luminosity density\n L_nu = cgs.erg / si.s / si.Hz\n L_la = cgs.erg / si.s / si.angstrom\n nu_L_nu = cgs.erg / si.s\n la_L_la = nu_L_nu\n phot_L_la = astrophys.photon / (si.s * si.AA)\n phot_L_nu = astrophys.photon / (si.s * si.Hz)\n\n # surface brightness (flux equiv)\n S_la = cgs.erg / si.angstrom / si.cm**2 / si.s / si.sr\n S_nu = cgs.erg / si.Hz / si.cm**2 / si.s / si.sr\n nu_S_nu = cgs.erg / si.cm**2 / si.s / si.sr\n la_S_la = nu_S_nu\n phot_S_la = astrophys.photon / (si.cm**2 * si.s * si.AA * si.sr)\n phot_S_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz * si.sr)\n\n # surface brightness (luminosity equiv)\n SL_nu = cgs.erg / si.s / si.Hz / si.sr\n SL_la = cgs.erg / si.s / si.angstrom / si.sr\n nu_SL_nu = cgs.erg / si.s / si.sr\n la_SL_la = nu_SL_nu\n phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr)\n phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr)\n\n def f_la_to_f_nu(x):\n return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)\n\n def f_la_from_f_nu(x):\n return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)\n\n def f_nu_to_nu_f_nu(x):\n return x * wav.to_value(si.Hz, spectral())\n\n def f_nu_from_nu_f_nu(x):\n return x / wav.to_value(si.Hz, spectral())\n\n def f_la_to_la_f_la(x):\n return x * wav.to_value(si.AA, spectral())\n\n def f_la_from_la_f_la(x):\n return x / wav.to_value(si.AA, spectral())\n\n def phot_f_la_to_f_la(x):\n return hc * x / wav.to_value(si.AA, spectral())\n\n def phot_f_la_from_f_la(x):\n return x * wav.to_value(si.AA, spectral()) / hc\n\n def phot_f_la_to_f_nu(x):\n return h_cgs * x * wav.to_value(si.AA, spectral())\n\n def phot_f_la_from_f_nu(x):\n return x / (wav.to_value(si.AA, spectral()) * h_cgs)\n\n def phot_f_la_to_phot_f_nu(x):\n return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps\n\n def phot_f_la_from_phot_f_nu(x):\n return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2\n\n phot_f_nu_to_f_nu = phot_f_la_to_f_la\n phot_f_nu_from_f_nu = phot_f_la_from_f_la\n\n def phot_f_nu_to_f_la(x):\n return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3\n\n def phot_f_nu_from_f_la(x):\n return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)\n\n # for luminosity density\n L_nu_to_nu_L_nu = f_nu_to_nu_f_nu\n L_nu_from_nu_L_nu = f_nu_from_nu_f_nu\n L_la_to_la_L_la = f_la_to_la_f_la\n L_la_from_la_L_la = f_la_from_la_f_la\n\n phot_L_la_to_L_la = phot_f_la_to_f_la\n phot_L_la_from_L_la = phot_f_la_from_f_la\n phot_L_la_to_L_nu = phot_f_la_to_f_nu\n phot_L_la_from_L_nu = phot_f_la_from_f_nu\n phot_L_la_to_phot_L_nu = phot_f_la_to_phot_f_nu\n phot_L_la_from_phot_L_nu = phot_f_la_from_phot_f_nu\n phot_L_nu_to_L_nu = phot_f_nu_to_f_nu\n phot_L_nu_from_L_nu = phot_f_nu_from_f_nu\n phot_L_nu_to_L_la = phot_f_nu_to_f_la\n phot_L_nu_from_L_la = phot_f_nu_from_f_la\n\n return Equivalency(\n [\n # flux\n (f_la, f_nu, f_la_to_f_nu, f_la_from_f_nu),\n (f_nu, nu_f_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),\n (f_la, la_f_la, f_la_to_la_f_la, f_la_from_la_f_la),\n (phot_f_la, f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),\n (phot_f_la, f_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),\n (phot_f_la, phot_f_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),\n (phot_f_nu, f_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),\n (phot_f_nu, f_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),\n # integrated flux\n (la_phot_f_la, la_f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),\n # luminosity\n (L_la, L_nu, f_la_to_f_nu, f_la_from_f_nu),\n (L_nu, nu_L_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),\n (L_la, la_L_la, L_la_to_la_L_la, L_la_from_la_L_la),\n (phot_L_la, L_la, phot_L_la_to_L_la, phot_L_la_from_L_la),\n (phot_L_la, L_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),\n (phot_L_la, phot_L_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),\n (phot_L_nu, L_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),\n (phot_L_nu, L_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),\n # surface brightness (flux equiv)\n (S_la, S_nu, f_la_to_f_nu, f_la_from_f_nu),\n (S_nu, nu_S_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),\n (S_la, la_S_la, f_la_to_la_f_la, f_la_from_la_f_la),\n (phot_S_la, S_la, phot_f_la_to_f_la, phot_f_la_from_f_la),\n (phot_S_la, S_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),\n (phot_S_la, phot_S_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),\n (phot_S_nu, S_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),\n (phot_S_nu, S_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),\n # surface brightness (luminosity equiv)\n (SL_la, SL_nu, f_la_to_f_nu, f_la_from_f_nu),\n (SL_nu, nu_SL_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),\n (SL_la, la_SL_la, L_la_to_la_L_la, L_la_from_la_L_la),\n (phot_SL_la, SL_la, phot_L_la_to_L_la, phot_L_la_from_L_la),\n (phot_SL_la, SL_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),\n (phot_SL_la, phot_SL_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),\n (phot_SL_nu, SL_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),\n (phot_SL_nu, SL_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),\n ],\n \"spectral_density\",\n {\"wav\": wav, \"factor\": factor},\n )", "def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)", "def normaliseTracesMagnitude(traces, unfolded_level): \n return traces / np.abs(unfolded_level)", "def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )", "def one_transition_spectrum_gauss(self,tr):\n \n \n fa = tr[\"fa\"] # Frequency axis\n HWHH = tr[\"HWHH\"] # Half width at the half hight (maximum)\n dd = tr[\"dd\"] # transition dipole strength\n rr = tr[\"rr\"] # transition dipole strength\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"]+self.rwa # frequency\n \n # LineShape = lambda p, x: (x/(p[1]*np.sqrt(2*m.pi))*np.exp(-0.5*((x-p[0])/p[1])**2))\n # broad = broad/np.sqrt(2*np.log(2))\n sigma = HWHH/numpy.sqrt(2*numpy.log(2))\n \n # x = ta.data\n \n data = (fa.data/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((fa.data-om)/sigma)**2))\n data_abs = dd*data\n data_CD = rr*data\n data_LD = ld*data\n \n return data_abs,data_CD, data_LD", "def spect_variation(sign, fs):\n f, ff = plotfft(sign, fs)\n energy, freq = signal_energy(ff, f)\n\n sum1 = 0\n sum2 = 0\n sum3 = 0\n for a in range(len(energy)-1):\n sum1 = sum1+(energy[a-1]*energy[a])\n sum2 = sum2+(energy[a-1]**2)\n sum3 = sum3+(energy[a]**2)\n\n if not sum2 or not sum3:\n variation = 1\n else:\n variation = 1-((sum1)/((sum2**0.5)*(sum3**0.5)))\n\n return variation", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def test_fft_spectrum_02():\n f, t, Sxx = _spectral_helper(x, x, fs=s_freq,\n window='hann',\n nperseg=x.shape[0],\n noverlap=0,\n nfft=None,\n return_onesided=True,\n mode='psd',\n scaling='spectrum')\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', scaling='energy', sides='one')\n\n assert_array_equal(f0, f)\n assert_array_almost_equal(Sxx0, Sxx[:, 0] * CORRECTION_FACTOR)", "def normalise(self, spectrum):\n\n return spectrum" ]
[ "0.62050503", "0.60166943", "0.56262547", "0.5620439", "0.5616198", "0.56150395", "0.560767", "0.55894583", "0.55654633", "0.5527765", "0.5519964", "0.54929906", "0.54644346", "0.5439519", "0.5435739", "0.54256", "0.54226637", "0.54216653", "0.5389448", "0.5377065", "0.5343928", "0.5332941", "0.53047884", "0.528194", "0.527634", "0.5274558", "0.52731633", "0.52693135", "0.5268529", "0.52647114" ]
0.7009917
0
Install a package. This runs the preinstscript, extracts the data, creates a package content file, sets the rights on extracted files, runs the postinst and removes temporary files created during the installation. Setting `suppressPackageContentFileGeneration` to `True` will suppress the creation of the package content file.
def install(self, clientDataDir, suppressPackageContentFileGeneration=False): self.setClientDataDir(clientDataDir) self.getMetaData() self.runPreinst() self.extractData() if not suppressPackageContentFileGeneration: self.createPackageContentFile() self.setAccessRights() self.runPostinst() self.cleanup()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_package(package):\n\tn_ucr = extFile(package, 'univention-config-registry')\n\tif not os.path.exists(n_ucr):\n\t\treturn\n\n\tf_ucr = open(n_ucr, 'r')\n\n\tfor item in univention.config_registry.parseRfc822(f_ucr.read()):\n\t\ttyp = item['Type'][0]\n\t\tif typ == 'file':\n\t\t\tf = item['File'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'subfile':\n\t\t\tf = item['Subfile'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\telif typ == 'multifile':\n\t\t\tf = item['Multifile'][0]\n\t\t\tif os.path.exists(f):\n\t\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'script':\n\t\t\tf = item['Script'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'scripts'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'scripts'))\n\t\t\tdoIt('chmod', 'a+x', destPath(f, package, 'scripts'))\n\t\telif typ == 'module':\n\t\t\tf = item['Module'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'modules'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'modules'))\n\t\telse:\n\t\t\tprint >>sys.stderr, 'Unknown type: %s' % typ\n\t\t\treturn\n\n\tf_ucr.close()\n\n\tdoIt('install', '-d', destDir('', package, 'info'))\n\tdoIt('install', '-m644', n_ucr, destPath(package+'.info', package, 'info'))\n\tmapping_file = extFile( package, 'univention-config-registry-mapping')\n\tif os.path.exists(mapping_file):\n\t\tdoIt('install', '-d', destDir('', package, 'mapping'))\n\t\tdoIt('install', '-m644', mapping_file, destPath(package+'.univention-config-registry-mapping', package, 'mapping'))\n\n\tdata = {\n\t\t\t'pkg': quote(package),\n\t\t\t'info': quote(\"/etc/univention/templates/info/%s.info\" % package),\n\t\t\t'removed': quote(\"/etc/univention/templates/removed/%s.info\" % package),\n\t\t\t}\n\n\tf_preinst = open(extFile(package, 'preinst.debhelper'), 'a')\n\tf_preinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_preinst.write('if [ \"$1\" = \"install\" ] ; then\\n')\n\tf_preinst.write(' [ -e %(removed)s ] && [ ! -e %(info)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_preinst.write('fi\\n')\n\tf_preinst.write('# End automatically added section\\n')\n\tf_preinst.close()\n\n\tf_postinst = open(extFile(package, 'postinst.debhelper'), 'a')\n\tf_postinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_postinst.write('if [ \"$1\" = \"abort-remove\" ]; then\\n')\n\tf_postinst.write(' [ -e %(removed)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_postinst.write('fi\\n')\n\tf_postinst.write('[ -x /usr/sbin/univention-config-registry ] && univention-config-registry register %(pkg)s || true\\n' % data)\n\tf_postinst.write('# End automatically added section\\n')\n\tf_postinst.close()\n\n\tf_prerm = open(extFile(package, 'prerm.debhelper'), 'a')\n\tf_prerm.write('# Automatically added by univention-install-config-registry\\n')\n\tf_prerm.write('if [ \"$1\" = \"remove\" ] && [ -e %(info)s ] ; then\\n' % data)\n\tf_prerm.write(' [ -x /usr/sbin/univention-config-registry ] && univention-config-registry unregister %(pkg)s || true\\n' % data)\n\tf_prerm.write(' mv %(info)s %(removed)s || true\\n' % data)\n\tf_prerm.write('fi\\n')\n\tf_prerm.write('# End automatically added section\\n')\n\tf_prerm.close()\n\n\tdoIt('perl', '-e', 'use Debian::Debhelper::Dh_Lib;addsubstvar(\"%s\", \"misc:Depends\", \"univention-config (>= 7.0.25)\");' % package)", "def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")", "def install(self, no_dependencies: bool = True):\n return PackageHelper.install_package(name=self.name, no_dependencies=no_dependencies)", "def _install_package(project_root, package):\n\n package = _return_package_url(project_root, package)\n\n # Some packages can be install multiple packages.\n if isinstance(package, ruamel.yaml.comments.CommentedSeq):\n # Loop through packages\n for x in package:\n logging.info('Multiple packages, installing: '+x)\n x_url = _return_package_url(project_root, str(x))\n print(\"xurl\", x_url)\n Packager(x_url, project_root).install()\n else:\n logging.info(\"Downloading from: \"+str(package))\n Packager(package, project_root).install()\n return package", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )", "def install_package_data(data_dir: str = None):\n\n zen = InstallPackageData(data_dir=data_dir)\n\n zen.fetch_zenodo()", "def command_package_install(*args):\n\n if len(args) == 0:\n print 'No package name specified'\n return 1\n packname = args[0]\n package_load_config(packname)\n\n if len(args) < 2:\n print \"Must specify a URL to install from\"\n return 1\n binary_url = args[1]\n\n depdir = package_path(packname)\n installdir = package_path(packname, packageconfig.install_dir_name)\n\n tempdir = os.path.join(tempfile.gettempdir(), 'sirikata-deploy-' + str(datetime.datetime.now().time()))\n os.mkdir(tempdir)\n\n try:\n subprocess.check_call(['curl', '-O', binary_url], cwd=tempdir)\n fname = binary_url.rsplit('/', 1)[1]\n if fname.endswith('tar.gz') or fname.endswith('tgz'):\n subprocess.check_call(['tar', '-xzvf', fname], cwd=tempdir)\n elif fname.endswith('tar.bz2'):\n subprocess.check_call(['tar', '-xjvf', fname], cwd=tempdir)\n elif fname.endswith('zip'):\n subprocess.check_call(['unzip', fname], cwd=tempdir)\n else:\n print \"Don't know how to extract file\", fname\n return 1\n\n # Figure out where the actual install is since archives\n # frequently have a layer of extra directories\n curdir = tempdir\n while True:\n subdirs = [x for x in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, x))]\n if 'bin' in subdirs:\n break\n assert(len(subdirs) == 1)\n curdir = os.path.join(curdir, subdirs[0])\n # Now swap the directory we found into place\n if os.path.exists(installdir): shutil.rmtree(installdir)\n shutil.move(curdir, installdir)\n # Cleanup\n shutil.rmtree(tempdir)\n except subprocess.CalledProcessError:\n return 1\n\n return 0", "def pre_install_pkg(self, installable_pkg):\n pass", "def _install(package_name, package_version, options_path, app_id, cli, app,\n yes):\n\n if cli is False and app is False:\n # Install both if neither flag is specified\n cli = app = True\n\n # Expand ~ in the options file path\n if options_path:\n options_path = os.path.expanduser(options_path)\n user_options = _user_options(options_path)\n\n package_manager = _get_package_manager()\n pkg = package_manager.get_package_version(package_name, package_version)\n\n pkg_json = pkg.package_json()\n pre_install_notes = pkg_json.get('preInstallNotes')\n if app and pre_install_notes:\n emitter.publish(pre_install_notes)\n if not _confirm('Continue installing?', yes):\n emitter.publish('Exiting installation.')\n return 0\n\n if app and pkg.has_mustache_definition():\n\n # render options before start installation\n options = pkg.options(user_options)\n\n # Install in Marathon\n msg = 'Installing Marathon app for package [{}] version [{}]'.format(\n pkg.name(), pkg.version())\n if app_id is not None:\n msg += ' with app id [{}]'.format(app_id)\n\n emitter.publish(msg)\n\n package_manager.install_app(\n pkg,\n options,\n app_id)\n\n if cli and pkg.has_cli_definition():\n # Install subcommand\n msg = 'Installing CLI subcommand for package [{}] version [{}]'.format(\n pkg.name(), pkg.version())\n emitter.publish(msg)\n\n subcommand.install(pkg)\n\n subcommand_paths = subcommand.get_package_commands(package_name)\n new_commands = [os.path.basename(p).replace('-', ' ', 1)\n for p in subcommand_paths]\n\n if new_commands:\n commands = ', '.join(new_commands)\n plural = \"s\" if len(new_commands) > 1 else \"\"\n emitter.publish(\"New command{} available: {}\".format(plural,\n commands))\n\n post_install_notes = pkg_json.get('postInstallNotes')\n if app and post_install_notes:\n emitter.publish(post_install_notes)\n\n return 0", "def install_pkg(pkg):\n# Checkout packages if we have them in ROOT\n src_path = check_module_path(pkg)\n# Analyzing packages: generation of package/module CMakeFile.txt,\n analizer_package(pkg, src_path)\n# Building DB\n db_manifest = generation_db_modules()\n# Print DB\n print_db_manifest(db_manifest)\n# Generating DB instance for dag\n dag_manifest = dag_pre_generation(db_manifest)\n# WIP: Still not working\n generation_lock_file_from_dag(dag_manifest)\n# Parcing packages in db_manifest\n parced_db_manifest = parser_db_manifest(db_manifest)\n# Resolving dependecies without DAG\n resolver_dependencies(pkg, parced_db_manifest)\n# Adopting name of package according generated DB\n# We are rewriting name of package!\n pkg = naming_checker(pkg)\n# Before buiding we need to check if pkg is really in the Db\n check_pkg_db(pkg, parced_db_manifest)\n# Check if package is installed\n if check_install_pkg_db(pkg, parced_db_manifest):\n return True\n else:\n# Trigger trigger_dependencies_pkg_db\n #trigger_dependency_pkg_db(pkg, parced_db_manifest)\n# Clean build directory\n clean_build(pkg, parced_db_manifest)\n# Reruning CMake\n rerun_configuration(pkg)\n# Buiding packages\n build_package(pkg, parced_db_manifest)\n# Preparing packages\n prepare_package(pkg)\n# Installing packages\n deploy_val = deploy_pkg(pkg)\n####################\n try:\n db_manifest[pkg][\"installed\"] = True\n except:\n pass\n return deploy_val, True", "def install_package(self, package):\n package = package.lower()\n command = shlex.split('sudo DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" --force-yes -y install ' + package)\n try:\n print subprocess.check_call(command, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n if \"unable to locate package\" in e.output.lower():\n print \"Can't identify package name. Check spelling of package name\"", "def packaging(src):\n\twork_copy = osp.dirname(src)\n\t\n\taddon_info = \"\".join(open(work_copy + osp.sep + \"install.rdf\"))\n\taddon_name = re.search(\"(?<=em\\:name\\=\\\").*(?=\\\")\",addon_info).group(0)\n\taddon_version = re.search(\"(?<=em\\:version\\=\\\").*(?=\\\")\",addon_info).group(0)\n\n\ttemp_copy_base = tempfile.mkdtemp()\n\ttemp_copy = osp.join(temp_copy_base,addon_name)\n\t\n\txpi_name = \"%s-%s.xpi\" % (addon_name,addon_version)\n\txpi_fullpath = osp.join(work_copy,xpi_name);\n\t\n\tprint \"\"\"\n\tAdd-on : %s\n\tVersion : %s\n\tWork Copy : %s\n\tTemp Copy : %s\n\tXPI File : %s\n\t\"\"\" % (addon_name,addon_version,work_copy,temp_copy, xpi_name)\n\n\tprint \"copying work to temp dir...\"\n\tcopytree(work_copy,temp_copy,ignore=ignore_patterns('scriptdemo','*.xpi','.*','*.bat','*.py','*LOG','*~','*.swp'))\n\n\tprint \"packaging xpi...\"\n\tcompress(temp_copy,xpi_fullpath);\n\n\tprint \"cleaning...\"\n\trmtree(temp_copy_base)", "def install(package_path):\n logging.info(\"Installing package: \" + package_path)\n project_root = get_project_root()\n # Update the package short name to url using index.\n package_path = _install_package(project_root, package_path)", "def run_package(m):\n\n if m.args.upload:\n doc = find_fs_package_from_dir(m.args.source)\n else:\n doc = find_csv_package(m)\n\n url, user, password = get_site_config(m.args.site_name)\n wp = Client(url, user, password)\n\n post = get_or_new_post(m, wp, doc)\n\n assert post is not None\n\n if m.args.upload:\n upload_to_wordpress(wp, post, doc)\n\n content = html(doc, m.args.template)\n\n post.excerpt = doc['Root'].get_value('Root.Description') or content[:200]\n\n post_tags = list(set(\n [t.value for t in doc['Root'].find('Root.Tag')] +\n [t.value for t in doc['Root'].find('Root.Group')] +\n [doc['Root'].get_value('Root.Origin')] +\n list(split_groups_tags(m.args.group)) +\n list(split_groups_tags(m.args.tag))\n ))\n\n post.terms_names = {\n 'post_tag': post_tags,\n 'category': ['Dataset'] + list(split_groups_tags(m.args.group))\n }\n\n post.title = doc.get_value('Root.Title')\n post.slug = slugify(doc.nonver_name)\n post.content = content\n\n if m.args.publish:\n post.post_status = 'publish'\n\n try:\n if m.args.no_op:\n r = {}\n else:\n r = wp.call(EditPost(post.id, post))\n except Fault as e:\n\n if 'taxonomies' in e.faultString:\n err((\"User {} does not have permissions to add terms to taxonomies. \"\n \"Terms are: {}\").format(user, post.terms_names))\n\n raise\n\n return r", "def install():\n execute(generate)\n execute(upload)", "def install(package:str, version:str=None, path:str=None):\r\n v = version\r\n if v is None:\r\n v = \"Latest\"\r\n logging.info(\"Installing Package {} - Version: {}...\".format(package, v))\r\n\r\n if path is None:\r\n path = get_site_packages_path()\r\n data = get_package_info(package, version)\r\n dst = os.path.join(path, data[\"name\"])\r\n filename = data[\"name\"] + \".zip\"\r\n\r\n if os.path.isdir(dst):\r\n raise IOError(\"The Package You Have Attempted To Install Is Already Installed. Use 'update' To Check For Package Updates\")\r\n\r\n logging.info(\"Downloading Package Data From PyGE Server...\")\r\n request.urlretrieve(data[\"url\"], os.path.join(path, filename))\r\n logging.info(\"Finished Downloading Package Data From PyGE Server\")\r\n\r\n logging.info(\"Extracting Downloaded Package Data...\")\r\n with zipfile.ZipFile(os.path.join(path, filename), 'r') as zip_ref:\r\n zip_ref.extractall(path)\r\n d = zip_ref.filelist[0].filename.replace(\"\\\\\", \"\").replace(\"/\", \"\") # type: str\r\n logging.info(\"Finished Extracting Downloaded Package Data\")\r\n\r\n logging.info(\"Cleaning Up Extra Package Files...\")\r\n os.rename(os.path.join(path, d), dst)\r\n json.dump(data, open(os.path.join(dst, package + \".info\"), 'w'))\r\n os.remove(os.path.join(path, filename))\r\n logging.info(\"Finished Cleaning Up Extra Package Files\")\r\n logging.info(\"Finished Installing Package {} - Version: {}\".format(package, v))", "def cipd_install(args, dest_directory, package, version):\n manifest_file = tempfile.mktemp()\n try:\n with open(manifest_file, 'w') as f:\n f.write('%s %s\\n' % (package, version))\n\n cipd_args = [\n args.cipd_client,\n 'ensure',\n '-list', manifest_file,\n '-root', dest_directory,\n ]\n if args.cipd_cache_directory:\n cipd_args.extend(['-cache-dir', args.cipd_cache_directory])\n if args.verbose:\n cipd_args.append('-verbose')\n _check_call(cipd_args)\n finally:\n os.remove(manifest_file)", "def post_install_pkg(self, installable_pkg):\n pass", "def install_package(package, remote):\n log.info('Installing package %s on %s', package, remote)\n flavor = remote.os.package_type\n if flavor == 'deb':\n pkgcmd = ['DEBIAN_FRONTEND=noninteractive',\n 'sudo',\n '-E',\n 'apt-get',\n '-y',\n '--force-yes',\n 'install',\n '{package}'.format(package=package)]\n elif flavor == 'rpm':\n # FIXME: zypper\n pkgcmd = ['sudo',\n 'yum',\n '-y',\n 'install',\n '{package}'.format(package=package)]\n else:\n log.error('install_package: bad flavor ' + flavor + '\\n')\n return False\n return remote.run(args=pkgcmd)", "def install_cached_package(self, package_name):\n self._log.info(\"Installing package {!r} from talus pypi\".format(package_name))\n pinfo = self.cache[\"pypi\"][package_name]\n pypi_hostname = re.match(r'^.*://([^/]+)/.*$', self.pypi_loc).group(1)\n\n try:\n self._run_pip_main([\n \"install\",\n \"--user\",\n \"--trusted-host\", pypi_hostname,\n \"-i\", self.pypi_loc,\n package_name\n ])\n except SystemExit as e:\n raise Exception(\"Is SystemExit expected?\")", "def install(self):\n # Check if the dir exists, pass installing silently if it doesn't exist\n if not os.path.exists(self.resource_dir):\n return\n\n # Get all the script files under self.resource_dir\n processing_files = []\n for item in os.listdir(self.resource_dir):\n file_path = os.path.join(self.resource_dir, item)\n if fnmatch.fnmatch(file_path, '*.py'):\n processing_files.append(file_path)\n\n for processing_file in processing_files:\n # Install silently the processing file\n try:\n script = ScriptAlgorithm(processing_file)\n except WrongScriptException:\n continue\n\n script_file_name = os.path.basename(processing_file)\n script_name = '%s (%s).%s' % (\n os.path.splitext(script_file_name)[0],\n self.collection_id,\n os.path.splitext(script_file_name)[1],)\n dest_path = os.path.join(self.scripts_folder(), script_name)\n with open(dest_path, 'w') as f:\n f.write(script.script)\n\n self.refresh_script_provider()", "def pkgbuildContentPackage( self, pars, directory ):\n\n return \"\"\"\\\n # Assemble your package in ${pkgdir}\n\"\"\"", "def post_package():\n package_file = BytesIO()\n with tarfile.open(mode='w', fileobj=package_file) as tar:\n # metadata\n meta_content = b'encoding: utf-8\\npost: post.md'\n file_info = tarfile.TarInfo('package.yml')\n file_info.size = len(meta_content)\n tar.addfile(file_info, BytesIO(meta_content))\n\n # post\n post_content = b'''---\ntitle: A title\ntopic: A topic\n---\n\n[summary]\nA summary\n\nA paragraph\n'''\n file_info = tarfile.TarInfo('post.md')\n file_info.size = len(post_content)\n tar.addfile(file_info, BytesIO(post_content))\n package_file.seek(0)\n\n return package_file", "def package():\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_egg\"], cwd=\"src\")\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_wheel\"], cwd=\"src\")", "def install(repo, package, python, editable):\n if repo.install(package, python, editable):\n click.echo('Done.')", "def install_from_repository(self) -> None:\n packages = self.list_packages(self.repository_packages, title=\"package\")\n\n # Extra ignore/check for yum to workaround BZ#1920176\n check = ShellScript(f'rpm -q --whatprovides {packages.to_script()}')\n script = check | self.operation_script(Command('install'), packages)\n\n if self.skip_missing:\n script |= ShellScript('true')\n else:\n script &= check\n\n # Check and install\n self.guest.execute(script)", "def install(force, packages):\n setup_audit_log()\n for pspec in CFG.package_specs(packages):\n perform_install(pspec, is_upgrade=False, force=force, quiet=False)", "def install_packages(self):\n\n logging.info(\"installing packages...\")\n\n # Create the target directory. DFT files will be installed under this\n # directory.\n try:\n logging.debug(\"copying DFT toolkit...\")\n\n # Create the target directory in the rootfs\n dft_target_path = self.project.rootfs_mountpoint + \"/dft_bootstrap/\"\n if not os.path.exists(dft_target_path):\n os.makedirs(dft_target_path)\n\n # Copy the DFT toolkit content to the target rootfs\n for copy_target in os.listdir(self.project.project_definition[\"configuration\"][\"dft-base\"]):\n logging.debug(\"Copy the DFT toolkit : preparing to copy \" + copy_target)\n copy_target_path = os.path.join(self.project.project_definition[\"configuration\"][\"dft-base\"], copy_target)\n if os.path.isfile(copy_target_path):\n logging.debug(\"copying file \" + copy_target_path + \" => \" + dft_target_path)\n file_util.copy_file(copy_target_path, dft_target_path)\n else:\n logging.debug(\"copying tree \" + copy_target_path + \" => \" + dft_target_path)\n dir_util.copy_tree(copy_target_path, os.path.join(dft_target_path, copy_target))\n\n # Copy the additional toolkit content to the target rootfs\n if \"additional-roles\" in self.project.project_definition[\"configuration\"]:\n for additional_path in self.project.project_definition[\"configuration\"][\"additional-roles\"]:\n logging.debug(\"Copy the additional toolkit : preparing to copy from additional path \" + additional_path)\n for copy_target in os.listdir(additional_path):\n logging.debug(\"Copy the additional toolkit : preparing to copy \" + copy_target)\n copy_target_path = os.path.join(additional_path, copy_target)\n if os.path.isfile(copy_target_path):\n logging.debug(\"copying file \" + copy_target_path + \" => \" + dft_target_path)\n file_util.copy_file(copy_target_path, dft_target_path)\n else:\n logging.debug(\"copying tree \" + copy_target_path + \" => \" + dft_target_path)\n dir_util.copy_tree(copy_target_path, os.path.join(dft_target_path, copy_target))\n\n except OSError as exception:\n # Call clean up to umount /proc and /dev\n self.cleanup_installation_files()\n logging.critical(\"Error: %s - %s.\", exception.filename, exception.strerror)\n exit(1)\n\n except shutil.Error as exception:\n self.cleanup_installation_files()\n logging.critical(\"Error: %s - %s.\", exception.filename, exception.strerror)\n exit(1)\n\n # Flag if someroles has been foundand added to site.yml\n role_has_been_found = False\n\n # Generate the site file including all the roles from baseos\n # configuration, then move roles to the target rootfs\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as working_file:\n # Generate file header\n working_file.write(\"# Defines the role associated to the rootfs being generated\\n\")\n working_file.write(\"---\\n\")\n working_file.write(\"- hosts: local\\n\")\n working_file.write(\"\\n\")\n\n # Test if some variable files have to be included\n if \"variables\" in self.project.project_definition[\"project-definition\"]:\n # Yes, then output the vars_files marker\n working_file.write(\" vars_files:\\n\")\n\n # And iterate the list of files containing variables\n for vars_file in self.project.project_definition[\"project-definition\"][\"variables\"]:\n # Append the file to the site.yml file\n working_file.write(\" - \" + vars_file + \"\\n\")\n logging.debug(\"Adding variables file \" + vars_file)\n\n # Completethe path to have a full path on disk (in case of path local\n # to where is located the project file)\n vars_file = self.project.genereate_definition_file_path(vars_file)\n\n # Copy the variabes fies to the bootstrap directory\n logging.debug(\"Copy the variables file : preparing to copy \" + vars_file)\n if os.path.isfile(vars_file):\n logging.debug(\"copying file \" + vars_file + \" => \" + dft_target_path)\n file_util.copy_file(vars_file, dft_target_path)\n else:\n logging.error(\"Variable files \" + vars_file + \" is not a file\")\n logging.error(\"Skipping this file\")\n\n # Just some spacing for pretty printing\n working_file.write(\"\\n\")\n\n working_file.write(\" roles:\\n\")\n\n # Iterate the list of distributions loaded from the file\n for role in self.project.baseos_definition[\"roles\"]:\n # At least one role has beenfound, flag it\n role_has_been_found = True\n logging.debug(\"Adding role \" + role)\n working_file.write(\" - \" + role + \"\\n\")\n\n # We are done with file generation, close it now\n working_file.close()\n\n # Generate the file path\n filepath = self.project.rootfs_mountpoint + \"/dft_bootstrap/site.yml\"\n\n # Finally move the temporary file under the rootfs tree\n sudo_command = \"sudo mv -f \" + working_file.name + \" \" + filepath\n self.execute_command(sudo_command)\n\n # Warn the user if no role is found. In such case baseos will be same\n # debotstrap, which is certainly not what is expected\n if not role_has_been_found:\n logging.warning(\"No role has been found in baseos definiion. Rootfs is same as debootstrap output\")\n logging.error(\"You may wish to have a look to : \" + self.project.genereate_definition_file_path(self.project.project_definition[\"project-definition\"][\"baseos\"][0]))\n\n # Execute Ansible\n # TODO : multiple target ? not sure...\n logging.info(\"running ansible...\")\n sudo_command = \"LANG=C sudo chroot \" + self.project.rootfs_mountpoint + \" /bin/bash -c \\\"cd /dft_bootstrap && /usr/bin/ansible-playbook -i inventory.yml -c local site.yml\\\"\"\n self.execute_command(sudo_command)\n logging.info(\"ansible stage successfull\")", "def update_package_files(self) -> None:\n # create the package folder\n self.package_path.mkdir(parents=True, exist_ok=True)\n\n self.clean() # Delete any previous *.py? files\n self.copy_stubs()\n self.create_readme()\n self.create_license()" ]
[ "0.6358", "0.62577534", "0.59546536", "0.5833951", "0.5801984", "0.577501", "0.57598114", "0.57473296", "0.5639463", "0.55367744", "0.551623", "0.549924", "0.5447735", "0.54421085", "0.5431225", "0.5414615", "0.5406775", "0.53997153", "0.5391413", "0.5389228", "0.5366957", "0.5352649", "0.53445804", "0.53292406", "0.5328736", "0.5320084", "0.53131914", "0.53078675", "0.5289645", "0.52872056" ]
0.7179274
0
displays a language selector dropdown in the admin, based on Django "LANGUAGES" context.
def language_selector(context): output = "" from django.conf import settings i18 = getattr(settings, 'USE_I18N', False) if i18: template = "admin/language_selector.html" context['i18n_is_set'] = True try: output = render_to_string(template, context) except: pass return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context['lang_versions'] = self.get_translations()\n context['default_lang'] = (settings.LANGUAGES[0][0])\n return context", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def __translationLanguage(self):\n return self.transLanguageComboBox.itemData(\n self.transLanguageComboBox.currentIndex())", "def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language", "def get_language(self):\n return self.lang", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def get_language(self):\r\n return self.language", "def language(self):\r\n return self._get('language', {})", "def add_game_language_subscriber(event):\n request = event.request\n # TODO: look up game language from a cookie or something\n en = db.get_by_identifier_query(db.t.Language, u'en').first()\n request.tmpl_context.game_language = en", "def _override_context(): \r\n if _CDjangoPluginActivator._instance._IsDjangoProject():\r\n return [wingapi.kContextNewMenu(_(\"Djang_o\"), group=2)]\r\n else:\r\n return []", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def overview():\r\n # Update the list of languages allowed on the site, \r\n # except for the language used by your users at that time.\r\n if request.method == 'POST':\r\n lan_object = Languages()\r\n data = lan_object.update()\r\n message = lan_object.message\r\n status = lan_object.status\r\n \r\n # Gets documents from the collections of all languages \r\n languages_list = g.languages_object.get_languages(1)\r\n language_chosen = g.languages_object.get_languages(2)\r\n return render_template( '{}/index.html'.format(MODULE_DIR), **locals())", "def get_related_language(self) -> str:\n pass", "def kb_settings_select(bot, update, groups):\n chat_id = update.message.chat_id\n language = groups[0]\n\n # Available languages\n languages = {\"pt_BR\": \"Português (Brasil)\",\n \"en_US\": \"English (US)\"}\n\n # If the language choice matches the expression AND is a valid choice\n if language in languages.keys():\n # Sets the user's language\n db.set(str(chat_id), language)\n bot.send_message(chat_id=chat_id,\n text=_(\"Language updated to {0}\")\n .format(languages[language]))\n else:\n # If it is not a valid choice, sends an warning\n bot.send_message(chat_id=chat_id,\n text=_(\"Unknown language! :(\"))", "def grepo(request):\n return {\n \"GREPO_LANGUAGES\": Language.objects.all().values_list(\"name\", flat=True)\n }", "def getLanguages(self):\n return self.__getColumnData(Q_LANGUAGES, 'language')", "def setup_site_languages(context):\n portal = context.getSite()\n ltool = portal.portal_languages\n \n defaultLanguage = bc.default_language\n supportedLanguages = list(bc.zope_i18n_allowed_languages.split())\n ltool.manage_setLanguageSettings(defaultLanguage, supportedLanguages,\n setUseCombinedLanguageCodes=True,\n setCookieN=True, setRequestN=True)\n logger.info(\"Site languages enabled.\")", "def get_lang(self):\n return self.langs.lang", "def released_langs(self):\r\n language_options = DarkLangConfig.current().released_languages_list\r\n if settings.LANGUAGE_CODE not in language_options:\r\n language_options.append(settings.LANGUAGE_CODE)\r\n return language_options", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def get_language(self) -> str:\n return self.language", "def field_choices_used_to_translated_value():\r\n LANGUAGES = (\r\n ('en', 'English'),\r\n ('ru', 'Russian'),\r\n )\r\n\r\n from django.db import models\r\n\r\n class Article(models.Model):\r\n name = models.CharField(max_length=200)\r\n language = models.CharField(max_length=200, choices=LANGUAGES)\r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\n class ArticleTable(tables.Table):\r\n class Meta:\r\n model = Article\r\n\r\n table = ArticleTable([Article(name='English article', language='en'),\r\n Article(name='Russian article', language='ru')])\r\n\r\n assert 'English' == table.rows[0]['language']\r\n assert 'Russian' == table.rows[1]['language']", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def get_language_name(self):\n return self.language_name", "def on_transLanguageComboBox_currentIndexChanged(self, index):\n self.__plugin.setPreferences(\n \"TranslationLanguage\", self.transLanguageComboBox.itemData(index))", "def __showLanguagesMenu(self, pos):\n aw = self.activeWindow()\n if aw is not None:\n menu = aw.getMenu(\"Languages\")\n if menu is not None:\n menu.exec_(pos)", "async def langs(self, context):\n languages = get_langs(context.message.guild)\n await context.channel.send(LANG_LIST.format(nb_lang=len(languages), langs=enum(languages)))", "def wikiLanguages():\n return languages", "def _save_lang(self):\n for combobox, (option, _default) in list(self.comboboxes.items()):\n if option == 'interface_language':\n data = combobox.itemData(combobox.currentIndex())\n value = from_qvariant(data, to_text_string)\n break\n save_lang_conf(value)\n self.set_option('interface_language', value)", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'" ]
[ "0.59492594", "0.59370124", "0.5830997", "0.57654065", "0.5667339", "0.5663755", "0.563679", "0.5610488", "0.5570556", "0.5564695", "0.5541525", "0.5510436", "0.5505482", "0.54878914", "0.5480784", "0.54672724", "0.5453488", "0.5449253", "0.54212254", "0.54074854", "0.5400048", "0.53990394", "0.53830695", "0.5372098", "0.53720784", "0.5356534", "0.5345835", "0.533718", "0.5334506", "0.5329395" ]
0.7890218
0
Objectconstructor for the funnels. Inside init, you could pass all needed variables that will be passed to the Funnel init as well.
def __init__( self, start_end_points: Tuple[tuple, tuple], funnel: Funnel, count: int, bins: Union[int, float], annot: bool = False, *args, **kwargs, ): self.left_top_point = ShapePoint(start_end_points[0]) self.right_top_point = ShapePoint(start_end_points[1]) self.bins = bins self.count = count step = abs((self.right_top_point[0] - self.left_top_point[0]) / count) x_start_point = self.left_top_point[0] x_end_point = x_start_point + step y_point = self.left_top_point[1] # Creating list for the annotation texts annot_step = int(self.bins / self.count) annot_bins = [i for i in range(0, int(self.bins) + annot_step, annot_step)] annots = [f"{x+1}–{y}" for x, y in zip(annot_bins[:-1], annot_bins[1:])] # Creating funnels in cycle for i in range(self.count): self.funnels.append( funnel( *args, start_end_points=( (x_start_point, y_point), (x_end_point, y_point), ), annot=annot, annot_text=annots[i], **kwargs, ) ) x_start_point, x_end_point = x_end_point, x_end_point + step super().__init__(*self.funnels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args):\n _snap.TCrossNetAFltI_swiginit(self, _snap.new_TCrossNetAFltI(*args))", "def __init__(self, opts):\n super(FunctionComponent, self).__init__(opts)\n self.options = opts.get(\"fn_cisco_umbrella_inv\", {})\n validate_opts(self)\n self.proxies = get_proxies(opts, self.options)", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def __init__(self, orbit_category, event_ref, user_ticket_pairs, chatroom_ref, cost_estimate, status):\n\n super().__init__()\n self.orbit_category = orbit_category\n self.event_ref = event_ref\n self.user_ticket_pairs = user_ticket_pairs\n self.chatroom_ref = chatroom_ref\n self.cost_estimate = cost_estimate\n self.status = status", "def __init__(self, objectiveFcn, gradientFcn, stepFcn):\n self.objective = objectiveFcn\n self.gradient = gradientFcn\n self.createStep = stepFcn", "def __init__(self, chain_instance, *args, **kwargs):\n protocol_logger('Intializing protocol processor')\n self.chain_instance = chain_instance", "def __init__(self, constructor_fn=None):", "def __init__(self, constructor_fn=None):", "def init(self, *args, **kwds):\n pass", "def __init__(self, func, connection, *args):\n self.func = func\n self.connection = connection\n self.args = args", "def __init__(self, **kwargs):\n super(ForwardLayersBase, self).__init__()\n pass", "def __init__(self, fritz_box, call_forwarding_dict):\n self.fritz_box = fritz_box\n self._name = \"callforwarding_\" + call_forwarding_dict['uid']\n self.uid = call_forwarding_dict['uid']\n self.from_number = call_forwarding_dict['from_number']\n self.to_number = call_forwarding_dict['to_number']\n self.connection_type = call_forwarding_dict['connection_type']\n self.enabled = call_forwarding_dict['enabled']", "def __init__(self, traffic_obj):\n self.request, self.response = traffic_obj[0], traffic_obj[1]\n self.param_dict = {}\n self.reflect = []", "def __init__(self, obj):\n # type: (object) -> object\n\n self.obj = obj\n cluster_id = self.obj.fuel_web.get_last_created_cluster()\n logger.info('#' * 10 + \"Cluster ID: \" + str(cluster_id))\n ip = self.obj.fuel_web.get_public_vip(cluster_id)\n logger.info('#' * 10 + \"IP : \" + str(ip))\n\n self.os_conn = os_actions.OpenStackActions(\n ip, SERVTEST_USERNAME, SERVTEST_PASSWORD, SERVTEST_TENANT)\n\n self.manila_conn = os_manila_actions.ManilaActions(\n ip, SERVTEST_USERNAME, SERVTEST_PASSWORD, SERVTEST_TENANT)", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self, function, **kwargs):\n self.function = function\n self.kwargs = kwargs", "def __init__(self, function, *args):\n self.function = function\n self.args = args", "def initialize(self, *args, **kwargs):", "def __init__(self, *args, **kwargs):\n try:\n geometry = kwargs['geometry']\n except:\n pass\n # ...\n dim = geometry.dim\n if dim == 1:\n func_one = lambda x : [ 1. ]\n func_zero = lambda x : [ 0. ]\n func_stiff = lambda x : [ 1. ]\n if dim == 2:\n func_one = lambda x,y : [ 1. ]\n func_zero = lambda x,y : [ 0. ]\n func_stiff = lambda x,y : [ 1., 0. \\\n , 0., 1. ]\n if dim == 3:\n func_one = lambda x,y,z : [ 1. ]\n func_zero = lambda x,y,z : [ 0. ]\n func_stiff = lambda x,y,z : [ 1., 0., 0. \\\n , 0., 1., 0. \\\n , 0., 0., 1. ]\n # ...\n\n # ...\n tc_d = {}\n tc_d['A'] = func_stiff\n tc_d['b'] = func_zero\n try:\n tc_d['AllDirichlet'] = kwargs['AllDirichlet']\n except:\n pass\n try:\n tc_d['bc_dirichlet'] = kwargs['bc_dirichlet']\n except:\n pass\n try:\n tc_d['bc_neumann'] = kwargs['bc_neumann']\n except:\n pass\n try:\n tc_d['Metric'] = kwargs['Metric']\n except:\n pass\n # ...\n\n # ...\n poisson.__init__(self, *args, **kwargs)\n self.Dn = basicPDE(geometry=geometry, testcase=tc_d)\n # ...\n\n # ...", "def initialize( self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)", "def __init__(self, *args):\n _snap.TNEANetAFltI_swiginit(self, _snap.new_TNEANetAFltI(*args))", "def __init__(self, *args, **kwargs):\n\n # ...\n\n poisson.__init__(self, *args, **kwargs)\n\n # ...", "def __init__(self, env, name, num_ports, forwarding_table=None):\n NetworkDevice.__init__(self, env, name, num_ports)\n env.process(self.listen_for_messages(self.forward_messages))\n if forwarding_table is None:\n self.forwarding_table = {}\n else:\n self.forwarding_table = forwarding_table", "def __init__(self, callable_fn, output_layer):\n\n super(DefenseWrapper, self).__init__()\n self.output_layer = output_layer\n self.callable_fn = callable_fn\n self.rec_model = None", "def __init__(self, opts):\n super(FunctionComponent, self).__init__(opts)\n self.options = opts.get(\"fn_hibp\", {})\n\n self.PROXIES = {}\n # Get proxies\n PROXY_HTTP = self.get_config_option(\"hibp_proxy_http\", True)\n PROXY_HTTPS = self.get_config_option(\"hibp_proxy_https\", True)\n\n if PROXY_HTTP is not None:\n self.PROXIES[\"http\"] = PROXY_HTTP\n\n if PROXY_HTTPS is not None:\n self.PROXIES[\"https\"] = PROXY_HTTP", "def initialise(self, **kwargs):\n pass", "def __init__(self, *args):\n this = _ida_hexrays.new_cif_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, convObjs, createBasicObjFunct, modCalcObjFunct, stdInpFromCalcObjs, attrDict=None):\n\t\tself._convObjs = convObjs\n\t\tself._createBasicObjFunct = createBasicObjFunct\n\t\tself._modCalcObjFunct = modCalcObjFunct\n\t\tself._stdInpFromCalcObjs = stdInpFromCalcObjs\n\n\t\tif attrDict is not None:\n\t\t\tfor key,val in attrDict.items():\n\t\t\t\tsetattr(self, key, val)", "def __init__(self, *observation_functions):\n self.observation_functions = observation_functions", "def __init__(self, *args):\n _snap.TFltFltH_swiginit(self, _snap.new_TFltFltH(*args))" ]
[ "0.6120769", "0.6116087", "0.60753834", "0.5962141", "0.5957412", "0.59420455", "0.5935756", "0.5935756", "0.59045035", "0.5900847", "0.5899501", "0.5888573", "0.5870248", "0.5869776", "0.58573395", "0.5797897", "0.5775388", "0.57736355", "0.57629496", "0.574592", "0.57338184", "0.57322186", "0.57266104", "0.57217973", "0.57029516", "0.5700544", "0.56981975", "0.56858087", "0.56849736", "0.56790805" ]
0.67489994
0
Method for moving dots into funnels. Calls the same method drag_in_dots for every funnel in self.funnels.
def drag_in_dots(self, scene: Scene, dots: VGroup, animate_slow: int, animate_rest: bool): if any(not isinstance(x, MovableFunnel) for x in self.funnels): raise FunnelsExeption('method "drag_in_dots" allowed only for "MovableFunnel"') # We are sorting dots ascending to be able to play animation from smallest dot to biggest. _dots = VGroup(*sorted(dots, key=lambda x: x.value)) for funnel in self.funnels: funnel.drag_in_dots( scene=scene, dots=_dots, animate_slow=animate_slow, animate_rest=animate_rest, ) animate_slow = animate_slow - funnel.animated_slowly
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connectTheDots():\n Nodes.connectTheDots()\n TimedNodes.connectTheDots()\n\n return", "def update_interpolated_and_dots(function_selector, discretization_Slider,\n interpolated_values):\n # Each base point (left or right foot and middle node) is shared by three\n # Finite Elements aside from the two the most right and the most left\n number_points = discretization_slider.value + 2\n\n x = np.linspace(LEFT_X, RIGHT_X, number_points)\n\n y = functions[function_selector.active](x)\n\n interpolated_values.data = {\n \"x\": x,\n \"y\": y\n }", "def moveCirc(self):\n\t\tfor circle in self.circles:\n\t\t\tcircle.moveStep()", "def get_dots(self):\n logging.debug('Generate dots to draw')\n gc = self.coordinates\n coords = []\n zmin = ymin = xmin = self.fmin = 999999\n self.fmax = 0\n for line in gc:\n temp = [None, None, None, None] # X, Y, Z, Feedrate\n for c in line:\n if c.startswith('X'):\n temp[0] = float(c[1:])\n xmin = min(xmin, temp[0])\n elif c.startswith('Y'):\n temp[1] = float(c[1:])\n ymin = min(ymin, temp[1])\n elif c.startswith('Z'):\n temp[2] = float(c[1:])\n zmin = min(zmin, temp[2])\n elif c.startswith('F'):\n temp[3] = int(float(c[1:]))\n self.fmin = min(self.fmin, temp[3])\n self.fmax = max(self.fmax, temp[3])\n if ((temp[0] is not None) or (temp[1] is not None) or\n (temp[2] is not None) or (temp[3] is not None)):\n if coords:\n if temp[0] is None:\n temp[0] = coords[-1][0]\n if temp[1] is None:\n temp[1] = coords[-1][1]\n if temp[2] is None:\n temp[2] = coords[-1][2]\n if temp[3] is None:\n temp[3] = coords[-1][3]\n coords.append(temp)\n\n if (self.fmin == 999999) or (self.fmax == 0):\n raise GcodeError('Please check feedrate')\n if (xmin == ymin == zmin == 999999):\n raise GcodeError('Please check coordinates')\n if xmin == 999999:\n xmin = 0\n if ymin == 999999:\n ymin = 0\n if zmin == 999999:\n zmin = 0\n\n for i in coords: # if something is still 0\n if i[0] is None:\n i[0] = xmin\n if i[1] is None:\n i[1] = ymin\n if i[2] is None:\n i[2] = zmin\n if i[3] is None:\n i[3] = self.fmin\n i[0] -= xmin\n i[1] -= ymin\n i[2] -= zmin\n i[3] -= self.fmin\n\n self.fmax -= self.fmin\n self.colors_list = grad(MIN_COLOR, MAX_COLOR, self.fmax+1)\n\n dots = []\n for i in range(len(coords)):\n temp = []\n if i != len(coords)-1:\n temp = self.getColorLine(coords[i], coords[i+1])\n if temp:\n dots.extend(temp)\n\n return dots", "def move_all_animals(self):\n\n y_lim, x_lim = np.shape(self.map)\n for y in range(y_lim):\n for x in range(x_lim):\n loc = y, x\n self.map[loc].migration(self.get_neighbour((y, x)))", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def Move(self):\n if not self.is_dead:\n direction = self.brain.GetNextDirection()\n if direction == False:\n #* if the brain has no directions left kill the dot\n self.Kill()\n return\n #*move\n #// self.acceleration = [self.acceleration[i] + direction[i] for i in range(len(self.acceleration))]\n self.acceleration = direction\n self.velocity = [max(-velocity_limit,min(velocity_limit,self.velocity[i] + self.acceleration[i])) for i in range(len(self.velocity))]\n self.position = [self.position[i]+self.velocity[i] for i in range(len(self.velocity))]\n #*Kill the dot if it hit the screen walls or the target\n if self.position[0] >= self.screen.get_width() or self.position[0] <= 0 or self.position[1] >= self.screen.get_height() or self.position[1] <= 0:\n self.Kill()\n if self.GetDistanceFromTarget() <=5:\n self.reached_target = True\n self.Kill()\n #* die when hit obstacle (test)\n if self.position[0] >= (self.screen.get_width()/2 - 150) and self.position[0] <= (self.screen.get_width()/2 + 150) and self.position[1] >= (self.screen.get_height()/2 - 5) and self.position[1] <= (self.screen.get_height()/2 + 5):\n self.Kill()", "def DoInsertDockLayer(panes, dock_direction, dock_layer):\r\n \r\n for ii in xrange(len(panes)):\r\n pane = panes[ii]\r\n if not pane.IsFloating() and pane.dock_direction == dock_direction and pane.dock_layer >= dock_layer:\r\n pane.dock_layer = pane.dock_layer + 1\r\n\r\n panes[ii] = pane\r\n\r\n return panes", "def update_Bubble(self):\n\t\t\n\t\tself.Bubble_initial_pos[0] += self.Bubble_vel[0]\n\t\tself.Bubble_initial_pos[1] += self.Bubble_vel[1]\n\n\t\tself.x0 = self.Bubble_initial_pos[0] - self.Bubble_radius\n\t\tself.y0 = self.Bubble_initial_pos[1] - self.Bubble_radius\n\t\tself.x1 = self.Bubble_initial_pos[0] + self.Bubble_radius\n\t\tself.y1 = self.Bubble_initial_pos[1] + self.Bubble_radius \n\n\t\tself.tk_pic.coords(self.Bubble_index, self.x0, self.y0, self.x1, self.y1)", "def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()", "def filter_dots(dots, DOG):\n return nd.filters.convolve(dots, DOG)", "def make_swivelknife_move(self):\n offset = self.shape.parentLayer.getToolRadius()\n drag_angle = self.shape.drag_angle\n\n startnorm = offset*Point(1, 0) # TODO make knife direction a config setting\n prvend, prvnorm = Point(), Point()\n first = True\n\n for geo in self.shape.geos.abs_iter():\n if isinstance(geo, LineGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n norm = offset * (geo_b.Pe - geo_b.Ps).unit_vector()\n geo_b.Ps += norm\n geo_b.Pe += norm\n if not prvnorm == norm:\n direction = prvnorm.to3D().cross_product(norm.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n self.append(geo_b)\n\n prvend = geo_b.Pe\n prvnorm = norm\n elif isinstance(geo, ArcGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n if geo_b.ext > 0.0:\n norma = offset*Point(cos(geo_b.s_ang+pi/2), sin(geo_b.s_ang+pi/2))\n norme = Point(cos(geo_b.e_ang+pi/2), sin(geo_b.e_ang+pi/2))\n else:\n norma = offset*Point(cos(geo_b.s_ang-pi/2), sin(geo_b.s_ang-pi/2))\n norme = Point(cos(geo_b.e_ang-pi/2), sin(geo_b.e_ang-pi/2))\n geo_b.Ps += norma\n if norme.x > 0:\n geo_b.Pe = Point(geo_b.Pe.x+offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y+(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n elif norme.x == 0:\n geo_b.Pe = Point(geo_b.Pe.x,\n geo_b.Pe.y)\n else:\n geo_b.Pe = Point(geo_b.Pe.x-offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y-(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n if prvnorm != norma:\n direction = prvnorm.to3D().cross_product(norma.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n prvend = geo_b.Pe\n prvnorm = offset*norme\n if -pi < geo_b.ext < pi:\n self.append(ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=geo_b.ext))\n else:\n geo_b = ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=-geo_b.ext)\n geo_b.ext = -geo_b.ext\n self.append(geo_b)\n # TODO support different geos, or disable them in the GUI\n # else:\n # self.append(copy(geo))\n if not prvnorm == startnorm:\n direction = prvnorm.to3D().cross_product(startnorm.to3D()).z\n self.append(ArcGeo(Ps=prvend, Pe=prvend-prvnorm+startnorm, r=offset, direction=direction))\n\n self.geos.insert(0, RapidPos(self.geos.abs_el(0).Ps))\n self.geos[0].make_abs_geo()", "def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()", "def draw_draughts():\n global red_draughts\n global white_draughts\n global board_array\n if(red_draughts == []):\n red_draughts = [board.create_oval(0,0,board_divisions,board_divisions,fill=\"red\") for i in xrange(0,15)]\n white_draughts = [board.create_oval(0,0,board_divisions,board_divisions,fill=\"white\")for i in xrange(0,15)]\n #And create event handlers for dragging these\n for draught in red_draughts:\n board.tag_bind(draught, \"<Button-1>\", move_draught_begin)\n board.tag_bind(draught, \"<B1-Motion>\", move_draught)\n board.tag_bind(draught, \"<ButtonRelease-1>\", move_draught_end)\n for draught in white_draughts:\n board.tag_bind(draught, \"<Button-1>\", move_draught_begin)\n board.tag_bind(draught, \"<B1-Motion>\", move_draught)\n board.tag_bind(draught, \"<ButtonRelease-1>\", move_draught_end)\n\n unmoved_red = list(red_draughts)\n unmoved_white = list(white_draughts)\n red_draughts = []\n white_draughts = []\n print board_array\n for i in xrange(1,len(board_array)-1): #Handle Points, ends and bar handled as special cases\n #Calculate where left side of draughts should be, and whether on top or bottom\n if i <= 6:\n left_side = board_divisions*(8+(6-i))\n bottom = True\n elif i <= 12:\n left_side = board_divisions*(1+(12-i))\n bottom = True\n elif i <= 18:\n bottom = False\n left_side = board_divisions*(1+(i-13))\n else: \n bottom = False\n left_side = board_divisions*(8+(i-19))\n #Move red draughts to right places\n for j in xrange(board_array[i][0]):\n temp = unmoved_red.pop()\n if(bottom == True):\n board.coords(temp,left_side+board_divisions//10*(j//5),board_divisions*(9-(j%5)),left_side+board_divisions+board_divisions//10*(j//5),board_divisions*(10-(j%5)))\n else:\n board.coords(temp,left_side+board_divisions//10*(j//5),board_divisions*(j%5),left_side+board_divisions+board_divisions//10*(j//5),board_divisions*((j%5)+1))\n red_draughts.append(temp)\n #Now white\n for j in xrange(board_array[i][1]):\n temp = unmoved_white.pop()\n if(bottom == True):\n board.coords(temp,left_side+board_divisions//10*(j//5),board_divisions*(9-(j%5)),left_side+board_divisions+board_divisions//10*(j//5),board_divisions*(10-(j%5)))\n else:\n board.coords(temp,left_side+board_divisions//10*(j//5),board_divisions*(j%5),left_side+board_divisions+board_divisions//10*(j//5),board_divisions*((j%5)+1))\n white_draughts.append(temp)\n #Handle white end, red bar\n #Move red draughts to right places on bar\n for j in xrange(board_array[0][0]):\n temp = unmoved_red.pop()\n board.coords(temp,7*board_divisions+board_divisions//10*(j//4),board_divisions*(9-(j%4)),7*board_divisions+board_divisions+board_divisions//10*(j//4),board_divisions*(10-(j%4)))\n red_draughts.append(temp)\n\n #Now white to places in goal\n for j in xrange(board_array[0][1]):\n temp = unmoved_white.pop()\n board.coords(temp,14*board_divisions+board_divisions//10*(j//4),board_divisions*(9-(j%4)),14*board_divisions+board_divisions+board_divisions//10*(j//4),board_divisions*(10-(j%4)))\n white_draughts.append(temp)\n #Handle red end, white\n #Move white draughts to right places on bar\n\n for j in xrange(board_array[25][1]):\n temp = unmoved_white.pop()\n board.coords(temp,7*board_divisions+board_divisions//10*(j//4),board_divisions*(j%4),7*board_divisions+board_divisions+board_divisions//10*(j//4),board_divisions*((j%4)+1))\n white_draughts.append(temp)\n\n #Now red to places in goal\n for j in xrange(board_array[25][0]):\n temp = unmoved_red.pop()\n board.coords(temp,14*board_divisions,board_divisions*j,15*board_divisions,board_divisions*(j+1))\n board.coords(temp,14*board_divisions+board_divisions//10*(j//4),board_divisions*(j%4),14*board_divisions+board_divisions+board_divisions//10*(j//4),board_divisions*((j%4)+1))\n red_draughts.append(temp)\n if(board_array[25][0] == 15):\n print \"You win!\"", "def update_points(self, *args):\n points = [Window.width / 2, Window.height / 2, .5, .5]\n i = 0\n while i < 2 * pi:\n i += 0.01 * pi\n points.extend([\n Window.width / 2 + cos(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n Window.height / 2 + sin(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n self.offset_x + sin(i),\n self.offset_y + cos(i)])\n\n self.mesh_points = points", "def event_drag_multipoint_line(self, event):\n\n if self.variables.current_shape_id:\n self.show_shape(self.variables.current_shape_id)\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n coords = self.coords(self.variables.current_shape_id)\n new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.ARROW or vector_object.type == SHAPE_TYPES.LINE:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n pass", "def motion(self, event):\n dx = event.x - self.dragx\n dy = event.y - self.dragy\n\n self.dragx = event.x\n self.dragy = event.y\n\n self.canvas.move(self.tags, dx, dy)\n self.diag.update_arrows()", "def deploy(self):\n step = 10\n for i in range(0, self.x, step): \n for j in range(0, self.y, step):\n self._place_nodes(i,j, step, max_nodes = 3)", "def mouseDragged(self, point, delta):\n pass", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def move_ghosts(self):\n temp_ghosts = []\n for ghost in self.ghosts:\n if (self.player.x - ghost.x > 0 and\n board.board[ghost.y][ghost.x+1 if ghost.x < 34 else ghost.x] == '.' or\n board.board[ghost.y][ghost.x+1 if ghost.x < 34 else ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y][ghost.x+1] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y][ghost.x+1] = 'G'\n ghost.x += 1\n elif (self.player.x - ghost.x < 0 and\n board.board[ghost.y][ghost.x-1 if ghost.x > 0 else ghost.x] == '.'\n or board.board[ghost.y][ghost.x-1 if ghost.x > 0 else ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y][ghost.x-1] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y][ghost.x-1] = 'G'\n ghost.x -= 1\n elif (self.player.y - ghost.y > 0\n and board.board[ghost.y+1 if ghost.y < 14 else ghost.y][ghost.x] == '.'\n or board.board[ghost.y+1 if ghost.y < 14 else ghost.y][ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y+1][ghost.x] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y+1][ghost.x] = 'G'\n ghost.y += 1\n elif (board.board[ghost.y-1 if ghost.y > 0 else ghost.y][ghost.x] == '.'\n or board.board[ghost.y-1 if ghost.y > 0 else ghost.y][ghost.x] == 'P'\n ):\n board.board[ghost.y][ghost.x] = '.'\n if board.board[ghost.y-1][ghost.x] == 'P':\n print 'Lost the game'\n self.player.die = 1\n exit()\n board.board[ghost.y-1][ghost.x] = 'G'\n ghost.y -= 1\n temp_ghosts.append(ghost)\n self.ghosts = temp_ghosts", "def _doAutostep(self):\n log.info(\"autostepping through all spokes on hub %s\", self.__class__.__name__)\n\n # create a list of all spokes in reverse alphabetic order, we will pop() from it when\n # processing all the spokes so the screenshots will actually be in alphabetic order\n self._spokesToStepIn = list(reversed(sorted(self._spokes.values(), key=lambda x: x.__class__.__name__)))\n\n # we can't just loop over all the spokes due to the asynchronous nature of GtkStack, so we start by\n # autostepping to the first spoke, this will trigger a callback that steps to the next spoke,\n # until we run out of unvisited spokes\n self._autostepSpoke()", "def setPointsToTurn(self):\r\n for point in self.points:\r\n point.setActiveTurn()", "def find_dots(img):\n # will hold all points\n coordinates = []\n # will hold only relevant points\n points = []\n # losing the side\n img[:, 475:] = 0\n # using for finding the best corners in edged image 65\n corners = cv2.goodFeaturesToTrack(img, 75, 0.085, 61)\n corners = np.int0(corners)\n for corner in corners:\n x, y = corner.ravel()\n if y > 350 or y < 10: # avoid from top and bottom\n continue\n coordinates.append((x, y))\n # sort in order to start from right to left\n sort_coordinates = sorted(coordinates)\n num_of_dot = 1\n for i in reversed(sort_coordinates):\n # when its 9, break\n if num_of_dot > 9:\n break\n points.append((i[0], i[1]))\n num_of_dot += 1\n return points", "def plot_draggable_points(self, X, K):\n \n for (x, y), k in zip(X, K):\n point = Point(self, x, y, self.colors[k])\n self.points.append(point)\n\n self.update_median()\n self.update_figure()", "def slider_dragged(self):\n pass", "def step(self):\n for layer in self.layers:\n layer.step()", "def explode(self):\n\n words = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n\n for i, image in enumerate(self.images):\n for j, chain in enumerate(list(self.traces[i])):\n for k, link in enumerate(chain):\n\n if k is 0:\n explode_image = image[:]\n\n explode_image = np.append(explode_image, [\"zero\", 0, 0])\n \n if k is not len(chain) - 1:\n next_image = explode_trace = explode_image[:-3] # remove word, x_coor, and y_coor\n\n word = words[k]\n x_coor = link[0]\n y_coor = link[1]\n\n np.append(explode_trace, [word, x_coor, y_coor])\n\n # Add the current and the next frames to the data\n self.explode_images.append(explode_image)\n self.explode_traces.append(explode_trace)\n explode_image = next_image\n else:\n # No action is made by the teacher after counting is done\n explode_trace = explode_image\n self.explode_images.append(explode_image)\n self.explode_traces.append(explode_trace)\n\n self.explode_length = len(self.explode_images)", "def _propagate_step(self):\n\n # optical depth to next interaction\n self.tau = -np.log(self.RNG.rand(self.N_active))\n # optical depth to sphere edge\n self.tau_edge = np.sqrt(self.tau_sphere**2 - self.tau_i**2 *\n (1. - self.mu_i**2)) - self.tau_i * self.mu_i\n\n # identify packets that escape\n self.esc_mask = self.tau_edge < self.tau\n # update number of escaping packets\n self.N_esc += self.esc_mask.sum()\n\n # identify interacting packets\n self.nesc_mask = np.logical_not(self.esc_mask)\n\n # decide which interacting packets scatter and which get absorbed\n self.abs_mask = self.RNG.rand(self.nesc_mask.sum()) >= self.albedo\n self.scat_mask = np.logical_not(self.abs_mask)\n\n # select properties of scattering packets\n self.tau = self.tau[self.nesc_mask][self.scat_mask]\n self.tau_i = self.tau_i[self.nesc_mask][self.scat_mask]\n self.mu_i = self.mu_i[self.nesc_mask][self.scat_mask]\n\n # update number of active packets\n self.N_active = self.scat_mask.sum()\n\n # update properties (position in optical depth space, propagation\n # direction) of scattering packets\n self.tau_i = np.sqrt(self.tau_i**2 + self.tau**2 +\n 2. * self.tau * self.tau_i * self.mu_i)\n self.mu_i = 2 * self.RNG.rand(self.N_active) - 1.", "def update_arrows(self, n=None):\n i = 0\n for arrow in self.arrows:\n arrow.update()\n i += 1\n if n and i>n: break" ]
[ "0.54222375", "0.527418", "0.5002062", "0.49782464", "0.48366687", "0.47597006", "0.47035027", "0.4701566", "0.46510434", "0.4605041", "0.4603975", "0.45744327", "0.4565291", "0.45511046", "0.4542252", "0.45417517", "0.45346773", "0.45223463", "0.44967106", "0.44768924", "0.4472782", "0.4461968", "0.44474304", "0.440983", "0.43946317", "0.43770307", "0.4372522", "0.43714565", "0.43590444", "0.43507338" ]
0.7737243
0
Test creating and cancelling an invoice
def test_invoice(self): invoice = self._create_invoice() self.assertEquals(invoice.total_amount, Decimal('2.38')) self.assertEquals(invoice.is_paid, False) # then cancel the created invoice cancelled_invoice = cancel_invoice(invoice) self.assertEquals(cancelled_invoice.total_amount, Decimal('-2.38'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invoice_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performe delete\n self._delete_model(\"invoice\", id_inv)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_item_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performe delete\n self._delete_model(\"invoiceitem\", id_itm)\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def test_cancel_pending_payment(self):\n pass", "def test_invoice_item_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n self._create_model(\"invoiceitem\", data, [\"quantity\", \"quote_price\"])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performing detail\n self._detail_model(\"invoice\", self.invoice_data, id, [])\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123456)\n self.assertEqual(invoice._populated, False)\n\n self.assertEqual(invoice.label, \"Invoice #123456\")\n self.assertEqual(invoice._populated, True)\n\n self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))\n self.assertEqual(invoice.total, 9.51)", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123, {})\n\n self.assertEqual(invoice.date, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(invoice.id, 123)\n self.assertEqual(invoice.label, \"Invoice\")\n self.assertEqual(invoice.subtotal, 120.25)\n self.assertEqual(invoice.tax, 12.25)\n self.assertEqual(invoice.total, 132.5)\n self.assertIsNotNone(invoice.tax_summary)", "def test_02_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_01_lunch_order()\r\n #We have a confirmed order with its associate cashmove\r\n #We execute the cancel function\r\n self.order_one.cancel()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #We check that the state is cancelled and that the cashmove has been deleted\r\n self.assertEqual(self.order_one.state,'cancelled')\r\n self.assertFalse(self.order_one.cashmove)", "def test_cancel_order(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(new_order['id']),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n cancelled_order = resp.get_json()\n self.assertEqual(cancelled_order['status'], 'Cancelled')", "def test_cancel_shipment(self):\n pass", "def test_terminate_agreement(self):\n pass", "def test_create_confirm_order_details(self):\n pass", "def test_invoice_item_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performing detail\n self._detail_model(\"invoiceitem\", self.invoice_item_data, id, [ \"quantity\", \"quote_price\" ])\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_item_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performe update\n data = self.invoice_item_data\n data[\"price_paid\"] = 88.77\n self._update_model(\"invoiceitem\", id, data, [\"quote_price\"])\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_dont_cancel_for_already_cancelled(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n self.unpaid.status = 'CANCELLED'\n self.unpaid.save()\n self.assertEqual(\n self.unpaid.status, 'CANCELLED', self.unpaid.status\n )\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking and studio once\n # for all cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n self.assertEqual(len(mail.outbox), 0)\n self.assertEqual(\n unpaid_booking.status, 'CANCELLED', unpaid_booking.status\n )\n\n # auto_cancelled set to True only on cancelled bookings\n self.assertFalse(unpaid_booking.auto_cancelled)\n self.assertFalse(self.paid.auto_cancelled)", "def test_cancel_pending_match(self):\n user1 = get_user_model().objects.get(username='[email protected]')\n self.client.login(username='[email protected]', password='1')\n\n office = OfficeLocation.objects.all()[0]\n org = OrgGroup.objects.filter(parent__isnull=True)[0]\n\n submission = Interest()\n submission.owner = user1\n submission.for_coffee = True\n submission.save()\n submission.locations.add(office)\n submission.departments.add(org)\n\n self.assertEqual(submission.is_active, True)\n resp = self.client.get(reverse('mystery:close_cancel', args=(submission.id,)))\n self.assertEqual(resp.status_code, 302)\n self.assertIn(reverse('mystery:mystery'), resp['Location'])\n self.assertEqual(Interest.objects.get(id=submission.id).is_active, False)", "def test_cancel_shipment_old(self):\n pass", "def create_invoice(invoice: Invoice, callback_url: Optional[HttpUrl] = None):\n # Send the invoice, collect the money, send the notification (the callback)\n return {\"msg\": \"Invoice received\"}", "def test_cancel_unpaid_bookings(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n self.assertEqual(\n self.unpaid.status, 'OPEN', self.unpaid.status\n )\n self.assertEqual(\n self.paid.status, 'OPEN', self.paid.status\n )\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n paid_booking = Booking.objects.get(id=self.paid.id)\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(\n unpaid_booking.status, 'CANCELLED', unpaid_booking.status\n )\n self.assertEqual(\n paid_booking.status, 'OPEN', paid_booking.status\n )\n\n # auto_cancelled set to True on cancelled bookings\n self.assertTrue(unpaid_booking.auto_cancelled)\n self.assertFalse(paid_booking.auto_cancelled)", "def test_dont_cancel_for_already_cancelled(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.unpaid.cancelled = True\n self.unpaid.save()\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once\n # for all cancelled bookings\n self.unpaid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertTrue(self.unpaid.cancelled)", "def test_create_confirm_delivery_details(self):\n pass", "def test_request_cancel_active_subscription(self):\n self.braintree_customer.subscription_id = \"1234\"\n self.braintree_customer.pending_cancel = False\n self.braintree_customer.save()\n self.assertTrue(SubscriptionManager.request_cancel(self.braintree_customer))\n self.assertTrue(self.braintree_customer.pending_cancel)", "def delete_invoice(cls, invoice_id: int): # pylint: disable=too-many-locals,too-many-statements\n # update transaction function will update the status from PayBC\n _update_active_transactions(invoice_id)\n\n invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=True)\n current_app.logger.debug(f'<Delete Invoice {invoice_id}, {invoice.invoice_status_code}')\n\n # Create the payment system implementation\n pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(invoice.payment_method_code)\n\n # set payment status as deleted\n payment = Payment.find_payment_for_invoice(invoice_id)\n _check_if_invoice_can_be_deleted(invoice, payment)\n\n if payment:\n payment.payment_status_code = PaymentStatus.DELETED.value\n payment.flush()\n\n # Cancel invoice\n invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)\n payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)\n\n if invoice_reference:\n pay_service.cancel_invoice(payment_account=payment_account, inv_number=invoice_reference.invoice_number)\n invoice.invoice_status_code = InvoiceStatus.DELETED.value\n\n for line in invoice.payment_line_items:\n line.line_item_status_code = LineItemStatus.CANCELLED.value\n\n if invoice_reference:\n invoice_reference.status_code = InvoiceReferenceStatus.CANCELLED.value\n invoice_reference.flush()\n\n invoice.save()\n\n current_app.logger.debug('>delete_invoice')", "def test_acknowledge_orders(self):\n pass", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def test_cancel_order_failure(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(23),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def test_request_cancel_no_subscription(self):\n # Customer has not set subscription ID\n with self.assertRaises(BraintreeError):\n SubscriptionManager.request_cancel(self.braintree_customer)" ]
[ "0.75387025", "0.7113662", "0.7041869", "0.6957861", "0.68343097", "0.6746061", "0.67317563", "0.6628058", "0.6626321", "0.65469885", "0.643424", "0.6363992", "0.63521475", "0.63066596", "0.62726057", "0.62344754", "0.6169239", "0.61677945", "0.61546206", "0.61513394", "0.6144442", "0.61408", "0.61389154", "0.6137291", "0.6123603", "0.61177295", "0.6088667", "0.6081016", "0.6077775", "0.60759115" ]
0.83051145
0
Test that the invoice_confirmed signal gets emitted correctly
def test_confirmed_signal(self): # it is a dict so that it can be modified inside the function counter = {'n_emits': 0} def on_invoice_confirmed(*args, **kwargs): counter['n_emits'] += 1 invoice_confirmed.connect(on_invoice_confirmed) invoice = self._create_invoice(confirmed=False) self.assertEquals(counter['n_emits'], 0) invoice.confirmed = True invoice.save() self.assertEquals(counter['n_emits'], 1) # only the first confirmation should count invoice.confirmed = False invoice.save() invoice.confirmed = True invoice.save() self.assertEquals(counter['n_emits'], 1) # invoice confirmed on creation should emit the signal too self._create_invoice(confirmed=True) self.assertEquals(counter['n_emits'], 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_confirmed(self):\n appt_date = datetime.date.today()\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = reminders.Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertFalse(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)", "def test_simple_confirmed(self):\n appt_date = datetime.date.today()\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertFalse(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)", "def test_customer_notified(self, mocked_notify_client):\n order = OrderPaidFactory()\n\n notify.quote_accepted(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.quote_accepted_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def confirmed(self):", "def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()", "def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())", "def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())", "def test_mixed_messages_confirmed(self):\n appt_date = datetime.date.today()\n self.test_patient.next_visit = appt_date\n self.test_patient.save()\n notified = self.create_unconfirmed_notification(self.test_patient, appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n notified_again = self.create_unconfirmed_notification(self.test_patient, appt_date)\n qs = reminders.Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertTrue(qs.count(), 1)", "def test_simple_unconfirmed(self):\n appt_date = datetime.date.today()\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = reminders.Patient.objects.unconfirmed_for_date(appt_date)\n self.assertFalse(self.test_patient in qs)\n self.assertTrue(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)", "def test_invoice(self):\n invoice = self._create_invoice()\n self.assertEquals(invoice.total_amount, Decimal('2.38'))\n self.assertEquals(invoice.is_paid, False)\n\n # then cancel the created invoice\n cancelled_invoice = cancel_invoice(invoice)\n self.assertEquals(cancelled_invoice.total_amount, Decimal('-2.38'))", "def test_simple_unconfirmed(self):\n appt_date = datetime.date.today()\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n qs = Patient.objects.unconfirmed_for_date(appt_date)\n self.assertFalse(self.test_patient in qs)\n self.assertTrue(self.other_patient in qs)\n self.assertFalse(self.unrelated_patient in qs)", "def test_customer_notified(self, mocked_notify_client):\n order = OrderPaidFactory()\n\n notify.order_paid(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.order_paid_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def test_customer_notified(self, mocked_notify_client):\n order = OrderFactory()\n\n notify.quote_cancelled(order, by=AdviserFactory())\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.quote_cancelled_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def test_customer_notified(self, mocked_notify_client):\n order = OrderWithOpenQuoteFactory()\n\n notify.order_cancelled(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.order_cancelled_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def test_multiple_notifications_confirmed(self):\n appt_date = datetime.date.today()\n self.test_patient.next_visit = appt_date\n self.test_patient.save()\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n confirmed_again = self.create_confirmed_notification(self.test_patient, appt_date)\n qs = reminders.Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertTrue(qs.count(), 1)", "def test_customer_notified(self, mocked_notify_client):\n order = OrderWithOpenQuoteFactory()\n\n notify.quote_generated(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.quote_sent_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def test_mixed_messages_confirmed(self):\n appt_date = datetime.date.today()\n self.create_unconfirmed_notification(self.test_patient, appt_date)\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.test_patient, appt_date)\n qs = Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertTrue(qs.count(), 1)", "def test_single_quant_confirm(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, confirm=True)\n # Check it is confirmed\n self.assertEqual(pick.state, \"confirmed\")", "def test_multiple_notifications_confirmed(self):\n appt_date = datetime.date.today()\n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_confirmed_notification(self.test_patient, appt_date)\n qs = Patient.objects.confirmed_for_date(appt_date)\n self.assertTrue(self.test_patient in qs)\n self.assertTrue(qs.count(), 1)", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123456)\n self.assertEqual(invoice._populated, False)\n\n self.assertEqual(invoice.label, \"Invoice #123456\")\n self.assertEqual(invoice._populated, True)\n\n self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))\n self.assertEqual(invoice.total, 9.51)", "def test_invoice_payment_notification(node_factory):\n opts = [{}, {\"plugin\": os.path.join(os.getcwd(), \"contrib/plugins/helloworld.py\")}]\n l1, l2 = node_factory.line_graph(2, opts=opts)\n\n msats = 12345\n preimage = '1' * 64\n label = \"a_descriptive_label\"\n inv1 = l2.rpc.invoice(msats, label, 'description', preimage=preimage)\n l1.rpc.pay(inv1['bolt11'])\n\n l2.daemon.wait_for_log(r\"Received invoice_payment event for label {},\"\n \" preimage {}, and amount of {}msat\"\n .format(label, preimage, msats))", "def test_create_confirm_delivery_details(self):\n pass", "def test_api_user_resend_confirmation_post(self):\n pass", "def test_invoice_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performe delete\n self._delete_model(\"invoice\", id_inv)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_advisers_notified(self, mocked_notify_client):\n order = OrderPaidFactory(assignees=[])\n assignees = OrderAssigneeFactory.create_batch(2, order=order)\n subscribers = OrderSubscriberFactory.create_batch(2, order=order)\n\n notify.quote_accepted(order)\n\n assert mocked_notify_client.send_email_notification.called\n # 1 = customer, 4 = assignees/subscribers\n assert len(mocked_notify_client.send_email_notification.call_args_list) == (4 + 1)\n\n calls_by_email = {\n data['email_address']: {\n 'template_id': data['template_id'],\n 'personalisation': data['personalisation'],\n }\n for _, data in mocked_notify_client.send_email_notification.call_args_list\n }\n for item in itertools.chain(assignees, subscribers):\n call = calls_by_email[item.adviser.get_current_email()]\n assert call['template_id'] == Template.quote_accepted_for_adviser.value\n assert call['personalisation']['recipient name'] == item.adviser.name\n assert call['personalisation']['embedded link'] == order.get_datahub_frontend_url()", "def test_confirm_add_flow_request_confirmed_consent(self):\n self.client.login(username='duck', password='duck')\n # Gets the confirmation code installed with the test data\n c = ConsentConfirmation.objects.get(confirmation_id=CORRECT_CONFIRM_ID)\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id={}'.format(CORRECT_CONFIRM_ID))\n\n redirect_url = '{}?process_id={}&success=true'.format(c.destination_endpoint_callback_url,\n c.flow_request.process_id)\n self.assertRedirects(res, redirect_url, fetch_redirect_response=False)\n flow_request = c.flow_request\n self.assertEqual(flow_request.status, FlowRequest.ACTIVE)\n channel = ConsentConfirmation.objects.get(confirmation_id=CORRECT_CONFIRM_ID).channel\n # It remain CR until the consent notification consumer gets the change\n self.assertEqual(channel.status, Channel.CONSENT_REQUESTED)", "def test_registered_with_notification_and_pin(self):\n now = datetime.datetime.now()\n self.contact.pin = '1234'\n self.contact.save()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now)\n msg = self._send(self.reg_conn, '1234')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)", "def test_registered_with_notification_and_pin(self):\n now = datetime.datetime.now()\n self.contact.pin = '1234'\n self.contact.save()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now,\n date_queued=now)\n msg = self._send(self.reg_conn, '1234')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')" ]
[ "0.71098185", "0.7048236", "0.67829335", "0.67025834", "0.6654247", "0.6595958", "0.6595958", "0.654072", "0.6531124", "0.6526328", "0.64574295", "0.6427712", "0.6408454", "0.6397459", "0.6392092", "0.6362075", "0.63616884", "0.6288885", "0.6262651", "0.60832816", "0.6037275", "0.6036434", "0.5992642", "0.5980315", "0.59268695", "0.5914844", "0.58999425", "0.5859572", "0.58531696", "0.58522063" ]
0.88064384
0
Gets the active_member_count of this Context17.
def active_member_count(self): return self._active_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumActive(self):\n return self._get_active( )", "def bounced_member_count(self):\n return self._bounced_member_count", "def member_count(self):\n return len(self.members)", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def active_member_count(self, active_member_count):\n\n self._active_member_count = active_member_count", "def get_count(self):\n\n\t\treturn self.__count", "def get_count(self):\n return self._count", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def get_count(self):\r\n return self.count", "def active_count(self):\n cnt = 0\n for item in self[:]:\n if item.is_alive():\n cnt += 1\n else:\n self.remove(item)\n return cnt", "def count(self):\n \n return self._count", "def GetCount(self):\n return(self.count)", "def count(self):\n return self.get_count()", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def getCount(self):\n return self.count", "def associated_object_count(self):\n return self._associated_object_count", "def Count(self):\n return self._get_attribute('count')", "def member_count(ctx, verbosity):\n\n if verbosity is not None:\n logging.basicConfig(level=getattr(logging, verbosity))\n else:\n logging.getLogger(__name__).addHandler(logging.NullHandler())\n\n ma = MailmanAdmin(os.environ['GEOUSAGE_MAILMAN_ADMIN_URL'],\n os.environ['GEOUSAGE_MAILMAN_ADMIN_PASSWORD'])\n\n click.echo(ma.member_count)" ]
[ "0.6896081", "0.640346", "0.61951387", "0.61759603", "0.61616695", "0.60395855", "0.6013379", "0.6006747", "0.6006747", "0.59408104", "0.5931322", "0.58700466", "0.5865242", "0.5846405", "0.58382374", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5832671", "0.5802591", "0.57261103", "0.56945163", "0.56929404", "0.5692258" ]
0.8488191
0
Sets the active_member_count of this Context17.
def active_member_count(self, active_member_count): self._active_member_count = active_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_member_count(self):\n return self._active_member_count", "def bounced_member_count(self, bounced_member_count):\n\n self._bounced_member_count = bounced_member_count", "def set_activeLedCount(self, newval):\n rest_val = str(newval)\n return self._setAttr(\"activeLedCount\", rest_val)", "def setCount(self, num):\n self.count=num", "def setActive(self, active):\n\n self._active = active", "def active_users(self, active_users):\n\n self._active_users = active_users", "def set_active(self, active):\n self._active = active", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def set_active(self, active):\n self.active = active", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def set_count(self, count):\n self._count = count", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def setActive(self, i, a=1):\n self.active[i] = a\n return", "def SetActive(self, b):\r\n\r\n self.active = b", "def active(self, active):\n if active is None:\n raise ValueError(\"Invalid value for `active`, must not be `None`\") # noqa: E501\n\n self._active = active", "def active(self, value):\n self._active = value\n # Check if this is already linked with an object in the database.\n # If it is, change the username in the user account too.\n try:\n self.userprofile.user.is_active = value\n except UserProfile.DoesNotExist:\n pass", "def set_active(self):\n self.active = True", "def set_count(c):\n global count\n count = c", "def direct_count(self, direct_count):\n\n self._direct_count = direct_count", "def bounced_member_count(self):\n return self._bounced_member_count", "def count(self, count):\n\n self._count = count" ]
[ "0.64063823", "0.5860484", "0.58239967", "0.5772565", "0.57543284", "0.57226", "0.5673672", "0.56626874", "0.56626874", "0.56626874", "0.56626874", "0.56626874", "0.5575489", "0.55588037", "0.55588037", "0.5521708", "0.5521708", "0.5521708", "0.5521708", "0.54948616", "0.5442933", "0.5394556", "0.52941954", "0.5285485", "0.5261767", "0.5258856", "0.5189663", "0.5141014", "0.5088145", "0.5080141" ]
0.83144844
0
Gets the bounced_member_count of this Context17.
def bounced_member_count(self): return self._bounced_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_member_count(self):\n return self._active_member_count", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def get_count(self):\n return self._count", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def bounced_member_count(self, bounced_member_count):\n\n self._bounced_member_count = bounced_member_count", "def get_count(self):\n\n\t\treturn self.__count", "def get_count(self):\r\n return self.count", "def member_count(self):\n return len(self.members)", "def GetCount(self):\n return(self.count)", "def count(self):\n \n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self.get_count()", "def getCount(self):\n return self.count", "def Count(self):\n return self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def removed_member_count(self):\n return self._removed_member_count", "def member_count(ctx, verbosity):\n\n if verbosity is not None:\n logging.basicConfig(level=getattr(logging, verbosity))\n else:\n logging.getLogger(__name__).addHandler(logging.NullHandler())\n\n ma = MailmanAdmin(os.environ['GEOUSAGE_MAILMAN_ADMIN_URL'],\n os.environ['GEOUSAGE_MAILMAN_ADMIN_PASSWORD'])\n\n click.echo(ma.member_count)", "def getCount(self):\n return self.base.get(\"count\", [])" ]
[ "0.7381187", "0.63977176", "0.63508046", "0.63348234", "0.63348234", "0.630128", "0.62972474", "0.6285729", "0.6238138", "0.6193707", "0.61564", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.6126884", "0.609391", "0.6071007", "0.6048109", "0.60124373", "0.60124373", "0.59176105", "0.5908896", "0.59063435", "0.58857536" ]
0.8398334
0
Sets the bounced_member_count of this Context17.
def bounced_member_count(self, bounced_member_count): self._bounced_member_count = bounced_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_member_count(self, active_member_count):\n\n self._active_member_count = active_member_count", "def bounced_member_count(self):\n return self._bounced_member_count", "def bid_count(self, bid_count):\n\n self._bid_count = bid_count", "def set_count(self, count):\n self._count = count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def setCount(self, num):\n self.count=num", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def byte_count(self, byte_count):\n\n self._byte_count = byte_count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def associated_object_count(self, associated_object_count):\n self._associated_object_count = associated_object_count", "def unsubscribed_member_count(self, unsubscribed_member_count):\n\n self._unsubscribed_member_count = unsubscribed_member_count", "def count(self, count: int) -> None:\n self._count = count", "def count(self, count: int):\n\n self._count = count", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def block_count(self, block_count):\n\n self._block_count = block_count", "def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count", "def processor_count(self, processor_count):\n\n self._processor_count = processor_count", "def removed_member_count(self, removed_member_count):\n\n self._removed_member_count = removed_member_count", "def set_city_count(self, city_count):\n self.city_count = city_count", "def node_count(self, node_count):\n\n self._node_count = node_count", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def direct_count(self, direct_count):\n\n self._direct_count = direct_count", "def setClassCount(self, count):\n\n return self._set(classCount=count)", "def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count" ]
[ "0.71493083", "0.7031893", "0.5979804", "0.59014523", "0.58768964", "0.58768964", "0.58521104", "0.58517843", "0.5821436", "0.5615978", "0.5615978", "0.5615978", "0.5615978", "0.55973405", "0.55521816", "0.554865", "0.554214", "0.55111253", "0.5494533", "0.5494533", "0.54597133", "0.5457011", "0.5391334", "0.53583497", "0.52742964", "0.5259948", "0.52487093", "0.52283096", "0.5215908", "0.52076006" ]
0.86100817
0
Gets the created_by of this Context17.
def created_by(self): return self._created_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def created_by(self) -> str:\n return pulumi.get(self, \"created_by\")", "def created_by(self):\n url = self._data.get('related', {}).get('created_by')\n return self._tower._get_object_by_url('User', url) # pylint: disable=protected-access", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_by(self) -> Optional[str]:\n return pulumi.get(self, \"created_by\")", "def created_user(self):\n return self._created_user", "def created_by(self) -> Optional[pulumi.Input['UserInfoArgs']]:\n return pulumi.get(self, \"created_by\")", "def created_by_id(self):\n return self._created_by_id", "def created_by(self) -> \"str\":\n return self._attrs.get(\"createdBy\")", "def created_by(self) -> \"str\":\n return self._attrs.get(\"createdBy\")", "def created_by(self) -> \"str\":\n return self._attrs.get(\"createdBy\")", "def created_by(self) -> \"str\":\n return self._attrs.get(\"createdBy\")", "def created_by_id(self) -> str:\n return self.__created_by_id", "def created_by_user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_by_user\")", "def created_by(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"created_by\")", "def created_by(self):\n if \"createdBy\" in self._prop_dict:\n if isinstance(self._prop_dict[\"createdBy\"], OneDriveObjectBase):\n return self._prop_dict[\"createdBy\"]\n else :\n self._prop_dict[\"createdBy\"] = IdentitySet(self._prop_dict[\"createdBy\"])\n return self._prop_dict[\"createdBy\"]\n\n return None", "def created_by(self) -> Optional['outputs.UserInfoResponse']:\n return pulumi.get(self, \"created_by\")", "def created_by(self):\n membership = UnitMembershipFactory(\n unit=self.unit, role=models.UnitMembershipRole.OWNER\n )\n return membership.user", "def created_by_user_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"created_by_user_id\")", "def created_at(self):\n return self.getattr('created_at')", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by_user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"created_by_user_id\")", "def created_at(self):\n return self._created_at", "def created_at(self):\n return self._created_at" ]
[ "0.7789317", "0.7770012", "0.757339", "0.757339", "0.757339", "0.757339", "0.757339", "0.757339", "0.756759", "0.7545281", "0.7522512", "0.7440229", "0.7440229", "0.7440229", "0.7440229", "0.7343571", "0.73141575", "0.72036463", "0.7189899", "0.71786857", "0.6838154", "0.66218024", "0.6558772", "0.6556036", "0.6556036", "0.6556036", "0.6556036", "0.65327114", "0.65032154", "0.65032154" ]
0.81725967
0
Gets the custom_fields_definition of this Context17.
def custom_fields_definition(self): return self._custom_fields_definition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_fields(self):\n return custom_fields.CustomFields(self)", "def custom_fields(self):\r\n return custom_fields.CustomFields(self)", "def custom_fields_definition(self, custom_fields_definition):\n\n self._custom_fields_definition = custom_fields_definition", "def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")", "def custom_fields(self) -> dict:\n url = f'{self.api_url}Fields?apiKey={self.api_key}'\n r_dict = self._es_get_request(url)\n self._check_response(r_dict)\n\n return {l['Field']['Name']: l['Field']['Id'] for l in\n r_dict['ApiResponse']['Data']['Fields']} # list of dicts", "def get_definition(self):\n return self.definition", "def get_recipe_raw_definition(self):\n return self.recipe_settings", "def getCustomDict(self):\n return self.custom", "def hook_def(self) -> HookDefinition:\n return self._hook_def", "def get_fields(self):\n\n\t\treturn self.__fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def _get_fields(self):\n return self._fields", "def get_dict(self):\n\n return self._definition", "def get_fields(self):\r\n return self.fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def definition(self):\n\n return self._definition", "def definition(self):\n\n return self._definition", "def custom_data_17(self):\n # type: () -> string_types\n return self._custom_data_17", "def custom_attributes(self):\n return self._custom_attributes", "def definition(self):\n return self._definition", "def get_fields_point(self):\n self.set_definition(sps21point)\n return self.get_fields()", "def deal_fields(self):\r\n return deals.DealFields(self)", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container" ]
[ "0.6732822", "0.6684957", "0.5955038", "0.5517716", "0.5398259", "0.5376668", "0.52525747", "0.5214191", "0.51776576", "0.5158833", "0.512581", "0.512581", "0.51108223", "0.5084462", "0.5083847", "0.5002417", "0.49952582", "0.49952582", "0.49414942", "0.49301967", "0.4929908", "0.4916454", "0.4915151", "0.48923993", "0.487504", "0.487504", "0.487504", "0.487504", "0.487504", "0.487504" ]
0.83008784
0
Sets the custom_fields_definition of this Context17.
def custom_fields_definition(self, custom_fields_definition): self._custom_fields_definition = custom_fields_definition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_fields_definition(self):\n return self._custom_fields_definition", "def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields", "def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields", "def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields", "def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields", "def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields", "def __update_custom_field_settings(self,\n eachfield, #field etree\n resourcetablename,\n fieldname\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n unikey = \"%s__%s\" % (resourcetablename, fieldname)\n field_property = self.custom_field_properties.get(unikey, {})\n\n cust_fieldtype = field_property.get(\"fieldtype\", None)\n cust_readable = field_property.get(\"readable\", None)\n cust_writable = field_property.get(\"writable\", None)\n cust_label = field_property.get(\"label\", None)\n cust_hint = field_property.get(\"hint\", None)\n cust_default = field_property.get(\"default\", None)\n cust_lines = field_property.get(\"lines\", None)\n cust_boxes = field_property.get(\"boxes\", None)\n cust_has_options = field_property.get(\"has_options\", None)\n cust_options = field_property.get(\"options\", None)\n\n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def crfform_definition(self, crfform_definition):\n\n self._crfform_definition = crfform_definition", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def custom_profile_fields(self, custom_profile_fields):\n\n self._custom_profile_fields = custom_profile_fields", "def custom_fields(self):\r\n return custom_fields.CustomFields(self)", "def custom_fields(self):\n return custom_fields.CustomFields(self)", "def apply_extra_fields(self, om_context, f):\n if om_context.is_device_component:\n f.data[ZFact.MetadataKeys.ZEN_SCHEMA_TAGS_KEY] = \"DeviceComponent\"\n elif om_context.is_device:\n f.data[ZFact.MetadataKeys.ZEN_SCHEMA_TAGS_KEY] = \"Device\"\n if om_context.mem_capacity is not None:\n f.data[\n ZFact.MetadataKeys.MEM_CAPACITY_KEY\n ] = om_context.mem_capacity\n\n if om_context.dimensions:\n f.metadata.update(om_context.dimensions)\n\n if om_context.metadata:\n f.data.update(om_context.metadata)", "def _append_customfield_fields(self):\n for customfield in self._get_custom_fields(self._get_content_type()):\n if customfield.ui_visibility == CustomFieldVisibilityChoices.VISIBILITY_HIDDEN:\n continue\n\n field_name = f'cf_{customfield.name}'\n self.fields[field_name] = self._get_form_field(customfield)\n\n # Annotate the field in the list of CustomField form fields\n self.custom_fields[field_name] = customfield\n if customfield.group_name not in self.custom_field_groups:\n self.custom_field_groups[customfield.group_name] = []\n self.custom_field_groups[customfield.group_name].append(field_name)", "def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field", "def custom_attributes(self, custom_attributes):\n\n self._custom_attributes = custom_attributes", "def additional_fields(self, additional_fields):\n\n self._additional_fields = additional_fields", "def mask_custom_field(self, custom_field, doc_type):\n\t\tcustom_field.fields.update({\n\t\t\t'doctype': 'DocField',\n\t\t\t'parent': doc_type,\n\t\t\t'parentfield': 'fields',\n\t\t\t'parenttype': 'DocType',\n\t\t})", "def definition(self, definition):\n\n self._definition = definition", "def custom_data_17(self, custom_data_17):\n # type: (string_types) -> None\n\n if custom_data_17 is not None:\n if not isinstance(custom_data_17, string_types):\n raise TypeError(\"Invalid type for `custom_data_17`, type has to be `string_types`\")\n\n self._custom_data_17 = custom_data_17", "def custom_data(self, custom_data):\n\n self._custom_data = custom_data", "def write_control_definition(self, write_control_definition):\n\n self._write_control_definition = write_control_definition", "def field_values(self, field_values):\n\n self._field_values = field_values", "def custom(self, custom):\n self._context[\"custom\"] = custom", "def _set_definition(self, definition: Dict[str, Any]):\n # Loop the keys and values of the provided definition\n for key, value in definition.items():\n\n if key not in self.store and key not in self.parents:\n self.store[key] = Configuration(pyfiguration=self.pyfiguration, parents=[*self.parents, key])\n\n self.definition[key] = value", "def timestep_definition(self, timestep_definition):\n\n self._timestep_definition = timestep_definition", "def modify_request_definition(self, request_definition_builder):\n request_definition_builder.argument_handler_builder.set_annotations(\n self._annotations, **self._more_annotations\n )", "def definition(self, definition: List[PipelineDefinition]):\r\n self._definition = definition", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n nested_field: NestedField = self.fields[self.nested]\n if not isinstance(nested_field, NestedField):\n raise TypeError(\n f'The field \"{self.nested}\" must be a NestedField instance, not \"{nested_field}\".')\n if nested_field.many:\n raise ValueError(f'The field \"{self.nested}\" can not be set as \"many=True\".')\n self.nested_field = nested_field\n # create partial methods\n self._do_dump = partial(\n getattr(self, self.dump_method),\n target=nested_field.dump_target,\n method=nested_field.dump,\n )\n self._do_load = partial(\n getattr(self, self.load_method),\n target=nested_field.load_target,\n method=nested_field.load,\n )" ]
[ "0.6642042", "0.64079607", "0.64079607", "0.64079607", "0.64079607", "0.64079607", "0.5507524", "0.5432757", "0.5380791", "0.5177665", "0.5114241", "0.50425124", "0.49577433", "0.4888433", "0.4846477", "0.4803468", "0.47680637", "0.47590303", "0.47508407", "0.47330523", "0.4682408", "0.46785122", "0.46635288", "0.4609449", "0.45719668", "0.45688832", "0.45641035", "0.45486966", "0.45190203", "0.4498802" ]
0.83766735
0
Gets the import_operation of this Context17.
def import_operation(self): return self._import_operation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return getattr(self, 'import_{type}'.format(type=self.type))()", "def get_import_mode(self):\n\t\treturn self.buttonImport.get_active()", "def import_operation(self, import_operation):\n\n self._import_operation = import_operation", "def get_import_resource_class(self):\n return self.process_import_resource(self.get_resource_class(usage='import'))", "def _get_import_step(self, cr, uid, external_session, context=None):\n return 100", "def process_import_resource(self, resource):\n return resource", "def current_operation(self):\n return self._current_operation", "def current_operation(self):\n return self._current_operation", "def get_import_status(self):\n return AsyncResult(self.import_task_id).state", "def dereference(self):\n offset = headers.calculateRelativeAddress(self, self['Name'])\n return self.p.p.new(IMAGE_IMPORT_HINT, __name__='ImportName', offset=offset)", "def import_info(self):\n return self.setup.import_info", "def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)", "def import_file(self) -> pulumi.Input['FileMetadataArgs']:\n return pulumi.get(self, \"import_file\")", "def getOp(self):\n return self._OPERATION", "def get_fit_import_setup(self):\n return self.setup.import_info", "def get_import_source(self, val):\n\t\treturn self.ia_class.get_source(val) + self.ia_submodule.get_source(val) + self.ia_function.get_source(val)", "def getOperation(self):\n return _libsbml.FluxBound_getOperation(self)", "def import_job(self) -> str:\n return pulumi.get(self, \"import_job\")", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def is_import():\n return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)", "def import_dir(self):\n return self._directory(1) # DIRECTORY_ENTRY_IMPORT", "def get_file_import_output(file_import_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFileImportResult]:\n ...", "def get_operagion(self):\n if self.OP_GID not in self._data_dict:\n return None\n return dao.get_operation_by_gid(self._data_dict.get(self.OP_GID, None))", "def T(layer):\n return graph.get_tensor_by_name(\"import/%s:0\" % layer)", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def _GetTpuOperationRef(self, operation):\n return resources.REGISTRY.ParseRelativeName(\n operation.name, collection='tpu.projects.locations.operations')", "def task(self):\n return import_path_to_callable(self.func)", "def operation(self) -> str:\n return self._operation", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def _get_op_str(self):\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n\n if type(self.operation) is str:\n op_name_str = self.operation\n else:\n op_name_str = self.operation.__name__\n\n try:\n getattr(cca_trans, op_name_str)\n op_str = f\"cca_trans.{op_name_str}\"\n except AttributeError:\n try:\n getattr(cca_out, op_name_str)\n op_str = f\"cca_out.{op_name_str}\"\n except AttributeError:\n op_str = op_name_str\n\n return op_str" ]
[ "0.65936714", "0.6466993", "0.6325107", "0.6175758", "0.5936432", "0.59285593", "0.586923", "0.586923", "0.5814522", "0.58043265", "0.5719262", "0.5651123", "0.56482184", "0.5646165", "0.5480833", "0.54793406", "0.5474109", "0.5467355", "0.5420354", "0.5364674", "0.53598595", "0.53011006", "0.52914566", "0.52641034", "0.52567786", "0.5233259", "0.5222142", "0.5205114", "0.5204466", "0.51796263" ]
0.8589078
0
Sets the import_operation of this Context17.
def import_operation(self, import_operation): self._import_operation = import_operation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_operation(self):\n return self._import_operation", "def _import(self, _import):\n\n self.__import = _import", "def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)", "def set_import_mode(self, flag):\n\t\tif self.buttonImport.get_active() and not flag:\n\t\t\tself.buttonImport.set_active(False)\n\t\t\treturn True\n\t\telif not self.buttonImport.get_active() and flag:\n\t\t\tself.buttonImport.set_active(True)\n\t\t\treturn True\n\t\treturn False", "def set_operation(self, operation):\n self._data_dict[self.OP_GID] = operation.gid", "def imported(self, imported):\n\n self._imported = imported", "def on_import(self, event=None):\n if event is not None:\n event.Skip()\n data_id, theory_id, state_id = self.set_data_helper()\n temp = data_id + state_id\n self.parent.set_data(data_id=temp, theory_id=theory_id)", "def operation(self, operation: str):\n\n self._operation = operation", "def start_import_task(clientRequestToken=None, name=None, importUrl=None):\n pass", "def on_button_import_clicked(self, button):\n\t\tif button.get_active():\n\t\t\t#NOTE: mnemonics are not recognized for this button. no idea why\n\t\t\t##button.set_label(_('Stop _import'))\n\t\t\tself.buttonImport.set_label('')\n\t\t\tself.buttonImport.child.set_text_with_mnemonic(_('Stop _import'))\n\t\t\tself._set_edit_mode(False)\n\t\t\tself.emit('start-import')\n\t\telse:\n\t\t\t#NOTE: mnemonics are not recognized for this button. no idea why\n\t\t\t##button.set_label(_('Start import'))\n\t\t\tself.buttonImport.child.set_text_with_mnemonic(_('Start _import'))\n\t\t\tself._set_edit_mode(True)\n\t\t\tself.emit('stop-import')", "async def async_step_import(self, import_info: dict[str, Any]) -> FlowResult:\n import_info.pop(CONF_MONITORED_CONDITIONS, None)\n import_info.pop(CONF_NICS, None)\n import_info.pop(CONF_DRIVES, None)\n import_info.pop(CONF_VOLUMES, None)\n return await self.async_step_user(import_info)", "def import_resource(self, overwrite: bool = True, **kwargs) -> None:\n pass", "def on_import(self, function_graph, node, reason):", "def operation(self, operation):\n if self._configuration.client_side_validation and operation is None:\n raise ValueError(\"Invalid value for `operation`, must not be `None`\") # noqa: E501\n\n self._operation = operation", "def process_import_resource(self, resource):\n return resource", "def _update_import_and_file(self, import_, request, reupload):\n with transaction.atomic():\n # update all parameters from the request. Since this is a re-upload,\n # and essentially the same as creating a new import, we'll allow\n # redefinition of any user-editable parameter\n import_context = {\n \"status\": Import.Status.CREATED,\n \"category_id\": request.data.get(\"category\", import_.category_id),\n \"file_format_id\": request.data.get(\n \"file_format\", import_.file_format_id\n ),\n \"protocol_id\": request.data.get(\"protocol\", import_.protocol.pk),\n \"compartment\": request.data.get(\"compartment\", import_.compartment),\n \"x_units_id\": request.data.get(\"x_units\", import_.x_units_id),\n \"y_units_id\": request.data.get(\"y_units\", import_.y_units_id),\n }\n\n # get the file to parse. it could be one uploaded in an earlier request\n old_file = None\n if reupload:\n file = ImportFile.objects.create(file=request.data[\"file\"])\n import_context[\"file_id\"] = file.pk\n old_file = import_.file\n import_, created = Import.objects.update_or_create(\n uuid=import_.uuid, defaults=import_context\n )\n\n # remove the old file after the reference to it is replaced\n if old_file:\n logger.debug(f\"Deleting file {old_file}\")\n old_file.delete()\n return import_", "def __call__(\n self,\n request: flow.ImportFlowRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operations_pb2.Operation:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"post\",\n \"uri\": \"/v3beta1/{parent=projects/*/locations/*/agents/*}/flows:import\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_import_flow(request, metadata)\n pb_request = flow.ImportFlowRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = operations_pb2.Operation()\n json_format.Parse(response.content, resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_import_flow(resp)\n return resp", "def setOp(self, op):\n self.__op = op", "def setOp(self, op):\n self.__op = op", "def __init__(__self__, *,\n content_type: pulumi.Input[Union[str, 'FileImportContentType']],\n import_file: pulumi.Input['FileMetadataArgs'],\n ingestion_mode: pulumi.Input[Union[str, 'IngestionMode']],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input[str],\n workspace_name: pulumi.Input[str],\n file_import_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"content_type\", content_type)\n pulumi.set(__self__, \"import_file\", import_file)\n pulumi.set(__self__, \"ingestion_mode\", ingestion_mode)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"workspace_name\", workspace_name)\n if file_import_id is not None:\n pulumi.set(__self__, \"file_import_id\", file_import_id)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def _get_import_step(self, cr, uid, external_session, context=None):\n return 100", "def data_import_crf_status(self, data_import_crf_status):\n\n self._data_import_crf_status = data_import_crf_status", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilenames()\n self._import_path_var.set(filename)", "def import_from(self, importer=None):\n if not importer:\n raise aspecd.exceptions.MissingImporterError(\"No importer provided\")\n importer.import_into(self)\n self._origdata = copy.deepcopy(self.data)", "def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()", "async def async_step_import(self, import_data: dict[str, str]) -> FlowResult:\n import_source = import_data.pop(\"import_source\")\n if import_source == \"geography_by_coords\":\n return await self.async_step_geography_by_coords(import_data)\n return await self.async_step_geography_by_name(import_data)", "def start_import(data_import):\n\tdata_import = frappe.get_doc(\"Data Import Beta\", data_import)\n\ti = Importer(data_import.reference_doctype, data_import=data_import)\n\treturn i.import_data()" ]
[ "0.7231188", "0.6295214", "0.5836815", "0.5603795", "0.5519087", "0.54600257", "0.54505676", "0.5446742", "0.5352566", "0.52968585", "0.5293666", "0.5288608", "0.5218256", "0.52099025", "0.5184431", "0.5161995", "0.5137286", "0.51354766", "0.51354766", "0.51264375", "0.5123549", "0.5123549", "0.5123549", "0.5116673", "0.51129436", "0.51042765", "0.5101957", "0.50866055", "0.50625277", "0.50244623" ]
0.84010893
0
Gets the removed_member_count of this Context17.
def removed_member_count(self): return self._removed_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def get_total_words_removed(self):\n return self.total_words_removed", "def removed_member_count(self, removed_member_count):\n\n self._removed_member_count = removed_member_count", "def question_count_removed(self, obj):\n return obj.questions.removed().count()", "def member_count(self):\n return len(self.members)", "def active_member_count(self):\n return self._active_member_count", "def __len__(self):\n return self._used - self._deleted", "def get_remaining_count(self):\n return self.total_count - self.count", "def getNumMembers(self):\n return _libsbml.ListOfMembers_getNumMembers(self)", "def bounced_member_count(self):\n return self._bounced_member_count", "def get_count(self):\n\n\t\treturn self.__count", "def get_count(self):\n return self._count", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def get_count(self):\r\n return self.count", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def getNumMembers(self):\n return _libsbml.Group_getNumMembers(self)", "def removed(self):\n return self.past_keys - self.intersect", "def filtered_count(self) -> int:\n return self.__filtered_count", "def count(self):\n \n return self._count", "def cleanupNonces(self):\n try:\n mist_nonces = MistNonce.objects()\n except me.DoesNotExist:\n mist_nonces = []\n\n counter = 0\n for n in mist_nonces:\n if n.is_old():\n n.delete()\n counter += 1\n\n return counter", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count" ]
[ "0.7013696", "0.6821845", "0.6383696", "0.63803935", "0.5906242", "0.5767794", "0.57416683", "0.5624927", "0.55618656", "0.5561286", "0.5558821", "0.55301356", "0.55023795", "0.55023795", "0.5490844", "0.5486998", "0.5480672", "0.54591745", "0.53917253", "0.53878504", "0.53764546", "0.53757256", "0.53757256", "0.53757256", "0.53757256", "0.53757256", "0.53757256", "0.53757256", "0.53757256", "0.53757256" ]
0.8549048
0
Sets the removed_member_count of this Context17.
def removed_member_count(self, removed_member_count): self._removed_member_count = removed_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removed_member_count(self):\n return self._removed_member_count", "def unsubscribed_member_count(self, unsubscribed_member_count):\n\n self._unsubscribed_member_count = unsubscribed_member_count", "def remove_member(self, member_to_remove):\r\n if (isinstance(member_to_remove, int) or\r\n isinstance(member_to_remove, float)):\r\n self._length -= 4\r\n else:\r\n self._length -= len(member_to_remove)\r\n self._members.remove(member_to_remove)", "def remove_token(self, amount):\n self.M -= amount", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def remove_member(self, member_to_remove):\r\n self._members.remove(member_to_remove)", "async def on_member_remove(self, member):\n await self.refresh_user_count_channel(member.guild)\n logger.info(f'{member.guild}: user {member} remove from guild')", "def removed(self, removed):\n\n self._removed = removed", "def active_member_count(self, active_member_count):\n\n self._active_member_count = active_member_count", "def reset_count(self):\n self.count = 0", "def remove_count(self, denom: CashDenomination, count: int) -> None:\n if not self.__open:\n raise RuntimeError(\"Cash drawer must be open to modify.\")\n if count < 0:\n raise ValueError(\"Count must not be negative.\")\n if count > self.__contents[denom]:\n raise ValueError(\"Cannot remove more than are present.\")\n self.__contents[denom] -= count", "def bounced_member_count(self, bounced_member_count):\n\n self._bounced_member_count = bounced_member_count", "def clear_members(self):\r\n self._members = []\r\n self._length = 4", "def removeMember(self, *args):\n return _libsbml.Group_removeMember(self, *args)", "def setCount(self, num):\n self.count=num", "def clean(self, now):\n def work():\n member = db.get(self.key())\n index = 0\n while index < len(member.tags):\n if member.stop_times[index] <= now:\n # We don't bother to update member_count here;\n # update_tagstats will eventually take care of it.\n member.remove_tag(member.tags[index])\n else:\n index += 1\n member.put()\n return member\n # Before starting a transaction, test if cleaning is needed.\n if self.stop_times and min(self.stop_times) <= now:\n return db.run_in_transaction(work)\n return self", "def _removeMeetingItemGroupedItemsNumAttribute(self):\n logger.info('Removing attribute \"groupedItemsNum\" from every items...')\n brains = self.catalog(meta_type=\"MeetingItem\")\n pghandler = ZLogHandler(steps=1000)\n pghandler.init('Working', len(brains))\n i = 0\n for brain in brains:\n i += 1\n pghandler.report(i)\n item = brain.getObject()\n safe_delattr(item, \"groupedItemsNum\")\n pghandler.finish()\n logger.info('Done.')", "def clear_members(self):\r\n self._members = []\r\n self._length = 0", "async def on_member_remove(member):\r\n pass", "def remove_life(self):\r\n if self.__lives < 0:\r\n self.__lives -= 1", "def set_count(self, count):\n self._count = count", "def remove_members(self, members):\n # TODO docstring\n self.__add_remove_members(members, remove=True)", "def remove_mutants_below_readcount(self, min_readcount, perfect_reads=False):\n if perfect_reads: get_readcount = lambda m: m.perfect_read_count\n else: get_readcount = lambda m: m.total_read_count\n # go over all mutants in self; need to convert dataset to a list to make a separate copy, \n # otherwise we'd be modifying the dataset while iterating through it, which isn't allowed.\n for mutant in list(self):\n if get_readcount(mutant) < min_readcount:\n self.remove_mutant(mutant.IB)\n # TODO really I shouldn't be removing mutants outright, just noting them as removed or something... In that case should they or should they not show up in \"for m in self\"? Probably not - they should have a separate dictionary?\n # TODO should I keep track of removed reads, and print in summary? MAYBE.", "def length(self, value):\n raise TypeError(\"Cannot delete {class-name} length property.\")", "def deduct_skill_points(self, amount):\r\n self.__skill_points -= amount\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skill points: \" + str(self.__skill_points))", "def resetCount(self):\n self.currentIndex = 0\n self.updateCurrentCommand()", "def test_remove_with_ref_count_gt_1(self):\n settings.TEST_SETTING_LIST = ['item1']\n wrapper = SettingListWrapper('TEST_SETTING_LIST', 'test setting list')\n wrapper.add('item1')\n\n self.assertEqual(wrapper.ref_counts.get('item1'), 2)\n wrapper.remove('item1')\n\n self.assertEqual(settings.TEST_SETTING_LIST, ['item1'])\n self.assertEqual(wrapper.ref_counts.get('item1'), 1)", "def remove(self, key, count=-1):\n removed = 0\n data = []\n for _key, _value in self.data:\n if _key.lower() != key.lower():\n if count > -1:\n if removed >= count:\n break\n else:\n removed += 1\n data.append((_key, _value))\n self.data = data", "def pop_counters(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def resetCounters(self):\n self.chain.zero_counters()\n counters = self.session.query(Counter).all()\n self.session.query(Counter).delete()" ]
[ "0.697273", "0.645651", "0.6037037", "0.5606054", "0.55690414", "0.5482646", "0.5481552", "0.5262689", "0.5258966", "0.52158", "0.5182624", "0.5136192", "0.503112", "0.5009139", "0.49902728", "0.49869564", "0.49549592", "0.491669", "0.4914983", "0.48692864", "0.4866584", "0.4856553", "0.48331124", "0.4803786", "0.4769212", "0.476513", "0.47585705", "0.47540352", "0.47368315", "0.47200528" ]
0.81735766
0
Gets the unsubscribed_member_count of this Context17.
def unsubscribed_member_count(self): return self._unsubscribed_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unsubscribed_member_count(self, unsubscribed_member_count):\n\n self._unsubscribed_member_count = unsubscribed_member_count", "def removed_member_count(self):\n return self._removed_member_count", "def get_subscribed_users(self, obj):\n return obj.subscribed_users.count()", "def active_member_count(self):\n return self._active_member_count", "def bounced_member_count(self):\n return self._bounced_member_count", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def unseen_messages(self, mailbox):\n data = self._cmd(\n \"STATUS\", self._encode_mbox_name(mailbox), \"(UNSEEN)\")\n m = self.unseen_pattern.match(data[-1].decode())\n if m is None:\n return 0\n return int(m.group(1))", "def get_unsubscription_channel(self):\n unsubscription_channels = dict(settings.UNSUBSCRIPTION_CHANNEL_CHOICES)\n return unsubscription_channels.get(self.unsubscription_channel, \"N/A\")", "def get_number_un_watched(self):\n movies_un_watched = 0\n for movie in self.movies:\n if not movie.is_watched:\n movies_un_watched += 1\n return movies_un_watched", "def unmerged_total(self):\n return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))", "def member_count(self):\n return len(self.members)", "def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')", "def subscriptions_limit(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"subscriptions_limit\")", "def get_unsubscription_reason(self):\n unsubscription_reasons = dict(settings.UNSUBSCRIPTION_REASON_CHOICES)\n return unsubscription_reasons.get(self.unsubscription_reason, \"N/A\")", "def get_count(self):\n return self._count", "def get_count(self):\n\n\t\treturn self.__count", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def notify_count(self):\n return self._notify_count", "def get_remaining_count(self):\n return self.total_count - self.count", "def get_not_contacted_count(self, seller_id):\n return self.get_not_contacted(seller_id).count()", "def getNumMembers(self):\n return _libsbml.ListOfMembers_getNumMembers(self)", "def message_count(self):\n return self._message_count", "def get_count(self):\r\n return self.count", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def get_total_words_removed(self):\n return self.total_words_removed", "def getCount(self):\n return self.base.get(\"count\", [])", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def tubclean_count(self):\n if self.is_info_v2:\n result = DeviceStatus.int_or_none(self._data.get(\"tclCount\"))\n else:\n result = self._data.get(\"TclCount\")\n if result is None:\n result = \"N/A\"\n return self._update_feature(WashDeviceFeatures.TUBCLEAN_COUNT, result, False)" ]
[ "0.71419156", "0.70558476", "0.5919587", "0.5731225", "0.5730982", "0.5687569", "0.5586158", "0.55647856", "0.55323863", "0.5452427", "0.54399496", "0.5360487", "0.53492814", "0.534088", "0.5291912", "0.5260404", "0.52590173", "0.52475226", "0.5246356", "0.5244646", "0.5243914", "0.5243549", "0.5229756", "0.52238774", "0.52214295", "0.52214295", "0.5196856", "0.5175953", "0.51653796", "0.5165079" ]
0.8775167
0
Sets the unsubscribed_member_count of this Context17.
def unsubscribed_member_count(self, unsubscribed_member_count): self._unsubscribed_member_count = unsubscribed_member_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def removed_member_count(self, removed_member_count):\n\n self._removed_member_count = removed_member_count", "def _unsubscribe(self):\n self.unsubscribe_date = now()\n self.unsubscribed = True\n self.subscribed = False", "def bounced_member_count(self, bounced_member_count):\n\n self._bounced_member_count = bounced_member_count", "def removed_member_count(self):\n return self._removed_member_count", "def active_member_count(self, active_member_count):\n\n self._active_member_count = active_member_count", "def unapproved(self, unapproved):\n\n self._unapproved = unapproved", "def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def unsubscribe(self, update, context):\n # remove or update to the sqlite table.\n chat = update.message.chat\n self.db_manager.remove_user(chat.id)\n self.logger.info(\n 'Username: %s and chat_id: %s unsubscribed to the list.' % (chat.username, chat.id)\n )\n update.message.reply_text('You have successfully unsubscribed the notifications forever.')", "def reset_count(self):\n self.count = 0", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def unsubscribe(self, item_name):\n self.subscribed = None", "def non_registered_guests_counter(window, counter_id_for_non_registered_attendees):\r\n window.write_event_value('-COUNT2-', counter_id_for_non_registered_attendees)", "def setCount(self, num):\n self.count=num", "def unsubscribe(self, inst):\r\n if inst in self._subscribers:\r\n self._subscribers.remove(inst)\r\n vprint(\"{} is unsubscribed from {}\".format(inst.name, self.name))", "def unassign_members(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_members\"), kwargs)", "def set_count(self, count):\n self._count = count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def unban_member(self, *args, **kwargs):\n return self.bot.unban_chat_member(self.id, *args, **kwargs)", "async def on_member_remove(self, member):\n await self.refresh_user_count_channel(member.guild)\n logger.info(f'{member.guild}: user {member} remove from guild')", "def view_mailinglist_unsubscribe(request, slug, uidb36, token):\n newsletter = get_object_or_404(Newsletter, slug=slug)\n contact = untokenize(uidb36, token)\n\n already_unsubscribed = contact in newsletter.mailing_list.unsubscribers.all()\n\n if request.POST.get('email') and not already_unsubscribed:\n newsletter.mailing_list.unsubscribers.add(contact)\n newsletter.mailing_list.save()\n already_unsubscribed = True\n ContactMailingStatus.objects.create(newsletter=newsletter, contact=contact,\n status=ContactMailingStatus.UNSUBSCRIPTION)\n\n return render_to_response('newsletter/mailing_list_unsubscribe.html',\n {'email': contact.email,\n 'already_unsubscribed': already_unsubscribed},\n context_instance=RequestContext(request))", "def test_set_color_unsubscribed_stream_id(self) -> None:\n test_user = self.example_user(\"hamlet\")\n self.login_user(test_user)\n\n sub_info = gather_subscriptions_helper(test_user)\n\n not_subbed = sub_info.never_subscribed\n\n result = self.api_post(\n test_user,\n \"/api/v1/users/me/subscriptions/properties\",\n {\n \"subscription_data\": orjson.dumps(\n [\n {\n \"property\": \"color\",\n \"stream_id\": not_subbed[0][\"stream_id\"],\n \"value\": \"#ffffff\",\n }\n ]\n ).decode()\n },\n )\n self.assert_json_error(\n result, \"Not subscribed to stream id {}\".format(not_subbed[0][\"stream_id\"])\n )", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def unset_response(update: Update, context: CallbackContext) -> None:\n context.bot_data.update({str(update.message.chat_id) : 'False'})\n user_data = OrderedDict(user=str(update.message.chat_id),subscribed='False') \n users_table.upsert(user_data, ['user'])\n logger.info(\"Unsubscribing user \" + str(update.message.chat_id))\n text = \"No worries, you've been unsubscribed.\\n\\n\" \\\n \"To subscribe to daily updates again, just press /daily\"\n update.message.reply_text(text)\n\n update_string = \"User \" + str(update.message.chat_id) + \" unsubscribed\"\n # Alert admin that user unsubscribed. \n context.bot.send_message(ADMIN_CONVERSATION_ID, parse_mode='HTML', text=update_string)", "def unsubscribe(self):\n pass # pragma: no cover", "def remove_count(self, denom: CashDenomination, count: int) -> None:\n if not self.__open:\n raise RuntimeError(\"Cash drawer must be open to modify.\")\n if count < 0:\n raise ValueError(\"Count must not be negative.\")\n if count > self.__contents[denom]:\n raise ValueError(\"Cannot remove more than are present.\")\n self.__contents[denom] -= count", "def bounced_member_count(self):\n return self._bounced_member_count" ]
[ "0.7436961", "0.6079733", "0.57903993", "0.54903007", "0.53204024", "0.52699184", "0.5116732", "0.5002936", "0.49549854", "0.49549854", "0.49265596", "0.4916866", "0.49056363", "0.48885486", "0.4874461", "0.47977608", "0.47616208", "0.4734782", "0.4723715", "0.47060105", "0.47060105", "0.46378195", "0.4627415", "0.4615889", "0.4614595", "0.4610981", "0.46055135", "0.45950773", "0.45923987", "0.4560824" ]
0.8702396
0
Gets the updated_by of this Context17.
def updated_by(self): return self._updated_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_by(self) -> Optional[pulumi.Input['UserInfoArgs']]:\n return pulumi.get(self, \"updated_by\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by(self) -> Optional['outputs.UserInfoResponse']:\n return pulumi.get(self, \"updated_by\")", "def updated_by_id(self):\n return self._updated_by_id", "def changed_by(self):\n return self._changed_by", "def changed_by(self):\n return self._changed_by", "def last_updated_user(self):\n return self._last_updated_user", "def last_modified_by(self):\n return self._last_modified_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def last_updated_by_id(self) -> str:\n return self.__last_updated_by_id", "def last_modified_by(self) -> str:\n return pulumi.get(self, \"last_modified_by\")", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def updated_on(self):\n return self._updated_on", "def updated_at(self) -> str:\n return pulumi.get(self, \"updated_at\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def created_by(self):\n return self._created_by" ]
[ "0.77070093", "0.7586538", "0.7586538", "0.7586538", "0.73329616", "0.72001487", "0.70631254", "0.70631254", "0.68099815", "0.671652", "0.6666177", "0.6666177", "0.6666177", "0.65548086", "0.65548086", "0.65548086", "0.6360967", "0.63594735", "0.6252926", "0.6174813", "0.61473984", "0.6134621", "0.6134621", "0.6134621", "0.6134621", "0.6134621", "0.6134621", "0.6096512", "0.6096512", "0.6085399" ]
0.8346993
0
Sets the updated_by of this Context17.
def updated_by(self, updated_by): self._updated_by = updated_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self):\n return self._updated_by", "def updated_by(self) -> Optional[pulumi.Input['UserInfoArgs']]:\n return pulumi.get(self, \"updated_by\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by(self) -> \"str\":\n return self._attrs.get(\"updatedBy\")", "def updated_by_id(self, updated_by_id):\n self._updated_by_id = updated_by_id", "def changed_by(self, changed_by):\n\n self._changed_by = changed_by", "def updated_by(self) -> Optional['outputs.UserInfoResponse']:\n return pulumi.get(self, \"updated_by\")", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated(self, updated: datetime):\n\n self._updated = updated", "def last_updated_user(self, last_updated_user):\n self._last_updated_user = last_updated_user", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def updated_by_id(self):\n return self._updated_by_id" ]
[ "0.76505786", "0.76505786", "0.76505786", "0.68549126", "0.6627744", "0.64946896", "0.64946896", "0.64946896", "0.64742637", "0.61784595", "0.6080924", "0.6007099", "0.6007099", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.5908962", "0.57520634", "0.57276976", "0.5702349", "0.5702349", "0.5702349", "0.5658863", "0.5628261" ]
0.786663
0
Gets the updated_on of this Context17.
def updated_on(self): return self._updated_on
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_on(self):\n return self.get_time(\"updated_on\")", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def time_updated(self):\n return self._time_updated", "def updated(self) -> datetime:\n return self._updated", "def updated_at(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"updated_at\")", "def updated_at(self) -> str:\n return pulumi.get(self, \"updated_at\")", "def Updated(self):\n return self._get_attr('Updated')", "def updated_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"updated_at\")", "def thisUpdate(self) -> datetime:\n this_update = self['tbs_cert_list']['this_update'].native\n return this_update", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def metadata_updated_on(item):\n ts = item['updated_at']\n ts = str_to_datetime(ts)\n\n return ts.timestamp()", "def updated(self) -> str:\n return self._updated", "def updated_at(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"updated_at\")", "def last_updated(self):\n return self._last_updated", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def get_last_updated_at(self):\n return self.last_updated", "def last_update(self):\n return self._last_update", "def last_update(self):\n return self._last_update", "def getThisUpdate(self):\n\n return self.get_POW().getThisUpdate()", "def getThisUpdate(self):\n\n return self.get_POW().getThisUpdate()", "def updated_on(self, updated_on):\n\n self._updated_on = updated_on", "def updated_datetime(self) -> datetime:\n return utc_to_local(self._db_data.updated_datetime)", "def updated(self):\n return getattr(self, self.schema._updated.name, None)", "def updated(self) -> datetime:\n return datetime.strptime(self.data['updated_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')", "def last_updated(self) -> str:\n return self._last_updated" ]
[ "0.8008507", "0.72253245", "0.72253245", "0.72253245", "0.707493", "0.7051411", "0.69915617", "0.6950934", "0.69193465", "0.6866043", "0.6785926", "0.6766942", "0.6766942", "0.6668178", "0.661875", "0.65489584", "0.65392816", "0.6523629", "0.6523629", "0.6523629", "0.6480343", "0.6336355", "0.6336355", "0.63192356", "0.63192356", "0.63063824", "0.6289058", "0.6232855", "0.61549306", "0.6148354" ]
0.8125521
0
Sets the updated_on of this Context17.
def updated_on(self, updated_on): self._updated_on = updated_on
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_on(self):\n return self._updated_on", "def updated(self, updated: datetime):\n\n self._updated = updated", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_on(self):\n return self.get_time(\"updated_on\")", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def update(self):\n self._is_on = self._is_on", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated(self, updated: str):\n\n self._updated = updated", "def time_updated(self, time_updated):\n self._time_updated = time_updated", "def set_on_update(cls, on_update_callback):\n cls._on_update = on_update_callback", "def updated_at(self, updated_at):\n if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `updated_at`, must not be `None`\") # noqa: E501\n\n self._updated_at = updated_at", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at" ]
[ "0.65927917", "0.65492976", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6331311", "0.6330777", "0.6285204", "0.6285204", "0.6285204", "0.6285204", "0.61929846", "0.609771", "0.609771", "0.6042161", "0.60160977", "0.60160977", "0.60160977", "0.6006875", "0.5804669", "0.5789361", "0.5704612", "0.56683123", "0.56683123" ]
0.7826675
0
Build an interpolator for a restricted range of x values
def build_restrict_interp(power, lower, upper): index = np.searchsorted(power[:,0], [lower,upper]) (imin, imax) = (np.max([0,index[0]-5]), np.min([len(power[:,0])-1,index[-1]+5])) newint = interp.interp1d(np.log(power[imin:imax:,0]), np.log(power[imin:imax,1]), kind='cubic') return newint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_interpolator(x: np.array, y: np.array):\n return intp.PchipInterpolator(x, y)", "def interpolate_number(x, xp, yp, clamp=True):\n x = np.asarray(x)\n \n # Specific case for empty domain\n if xp[0] == xp[-1] or len(xp)<2:\n if len(x.shape) == 0:\n return yp[0]\n else:\n return [yp[0],]*len(x)\n\n # Extrapolate\n if not clamp:\n # Single value\n if len(x.shape) == 0:\n if x < xp[0]:\n return yp[ 0] + (x-xp[ 0])*(yp[ 0]-yp[ 1]) / (xp[ 0]-xp[ 1])\n elif x > xp[-1]:\n return yp[-1] + (x-xp[-1])*(yp[-1]-yp[-2]) / (xp[-1]-xp[-2])\n else:\n return np.interp(x, xp, yp)\n # Values list\n else:\n # Specific case for empty domain\n if xp[0] == xp[-1] or len(xp)<2:\n return [yp[0],]*len(x)\n y = np.interp(x, xp, yp)\n y[x < xp[ 0]] = yp[ 0] + (x[x<xp[ 0]]-xp[ 0]) * (yp[ 0]-yp[ 1]) / (xp[ 0]-xp[ 1])\n y[x > xp[-1]] = yp[-1] + (x[x>xp[-1]]-xp[-1]) * (yp[-1]-yp[-2]) / (xp[-1]-xp[-2])\n return y\n\n # Interpolate\n return np.interp(x, xp, yp)", "def domain_range(domain, _range=[0, 1], return_transform=False):\n\n if not return_transform:\n return interp1d([min(domain), max(domain)], [min(_range), max(_range)], bounds_error=False)\n else:\n m = interp1d([min(domain), max(domain)], [min(_range), max(_range)])\n return [float(m(v)) for v in domain] # Take float, else returns weird numpy.ndarray element", "def return_interpolated(self, x_value):\n\n return(self.interpolator(x_value))", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def interpolate( h, x, y=None, z=None, outOfRangeValue=30 ):\n\n if x != x: return outOfRangeValue\n if x <= h.GetXaxis().GetBinCenter(1) or x >= h.GetXaxis().GetBinCenter(h.GetXaxis().GetNbins()): return outOfRangeValue\n \n if y != None:\n if y != y: return outOfRangeValue\n if y <= h.GetYaxis().GetBinCenter(1) or y >= h.GetYaxis().GetBinCenter(h.GetYaxis().GetNbins()): return outOfRangeValue\n if z != None:\n if z != z: return outOfRangeValue\n if z <= h.GetZaxis().GetBinCenter(1) or z >= h.GetZaxis().GetBinCenter(h.GetZaxis().GetNbins()): return outOfRangeValue\n \n if y != None and z != None: return h.Interpolate( x, y, z )\n if y != None: return h.Interpolate( x, y )\n return h.Interpolate( x )", "def get_interp_points(N, xmin, xmax):\n if points == \"fixed\":\n x = numpy.linspace(xmin, xmax, N)\n\n elif points == \"variable\":\n # the Chebyshev nodes\n x = 0.5*(xmin + xmax) + \\\n 0.5*(xmax - xmin)*numpy.cos(2.0*numpy.arange(N)*math.pi/(2*N))\n\n return x", "def __call__(self, x, y):\n return self.interp(x,y)", "def _build_interpolator(self):\n # Extract the data from the interpolation dataset\n self.interp_data, names, units = xr_dataset_to_array(self.interp_ds, \n self.ztsp[0])\n \n # Record the variables and their units\n self.f_names = names[1:]\n self.f_units = units[1:]\n \n # Create the interpolator\n self.f = interp1d(self.interp_data[:,0], \n self.interp_data[:,1:].transpose())", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def interpolate(data, x):\n n = len(data)\n\n if isinstance(data, dict):\n if x in data:\n return S(data[x])\n X, Y = list(zip(*data.items()))\n else:\n if isinstance(data[0], tuple):\n X, Y = list(zip(*data))\n if x in X:\n return S(Y[X.index(x)])\n else:\n if x in range(1, n + 1):\n return S(data[x - 1])\n Y = list(data)\n X = list(range(1, n + 1))\n\n try:\n return interpolating_poly(n, x, X, Y).expand()\n except ValueError:\n d = Dummy()\n return interpolating_poly(n, d, X, Y).expand().subs(d, x)", "def Interpolator(X, Y, TimeleftIndex, TimeRightIndex,YValue):\n Y1 = Y[TimeleftIndex]\n Y2 = Y[TimeRightIndex]\n X2 = X[TimeRightIndex]\n X1 = X[TimeleftIndex]\n slope = (Y2 - Y1) / (X2 - X1)\n if slope != 0:\n X0 = (YValue - Y1) / slope + X1\n return X0\n else:\n return 0", "def clip_range(x, xlim):\n return min([max([x, xlim[0]]), xlim[1]])", "def _extended_discrete_xaxis(x_axis, n_points=100, eps=0.10):\n min_value = np.min(x_axis)\n max_value = np.max(x_axis)\n distance = max_value - min_value\n return np.linspace(min_value - eps * distance, max_value + eps * distance,\n num=n_points)", "def interpolate(self, x_pivot, f_pivot):\n interpolation = interp1d(x_pivot, f_pivot,\n kind=self.kind, bounds_error=False)\n return interpolation", "def lin_int(xs, ys):\n return scipy.interpolate.interp1d(xs, ys)", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def preprocess(self):\n\n if self.x_range == None:\n x_min = min(np.min(self.fx), np.min(self.gx))\n x_max = max(np.max(self.fx), np.max(self.gx))\n self.x_range = [x_min,x_max]\n\n f_inter = interpolate.interp1d(self.fx, self.fy, 'cubic', fill_value = 'extrapolate')\n g_inter = interpolate.interp1d(self.gx, self.gy, 'cubic', fill_value = 'extrapolate')\n fgx_new = np.linspace(self.x_range[0], self.x_range[1], self.N)\n fy_new = f_inter(fgx_new)\n gy_new = g_inter(fgx_new)\n\n self.fx, self.fy = fgx_new, fy_new\n self.gx, self.gy = fgx_new, gy_new", "def translate(self, value, left_min, left_max, right_min=None, right_max=None):\n if right_min is None:\n right_min = self.values['pulse_min'].data\n if right_max is None:\n right_max = self.values['pulse_max'].data\n # Figure out how 'wide' each range is\n left_span = left_max - left_min\n right_span = right_max - right_min\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - left_min) / float(left_span)\n # Convert the 0-1 range into a value in the right range.\n return int(right_min + (value_scaled * right_span))", "def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)", "def ni_range(x0, x1, dx=1):\n # sanity check arguments\n if dx==0:\n raise ValueError(\"invalid parameters: dx==0\")\n if x0>x1 and dx>=0:\n raise ValueError(\"invalid parameters: x0>x1 and dx>=0\")\n if x0<x1 and dx<=0:\n raise ValueError(\"invalid parameters: x0<x1 and dx<=0\")\n \n # generate range list\n range_list = []\n x = x0\n while x < x1:\n range_list.append(x)\n x += dx\n return range_list", "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n # your code goes here", "def InterpolationFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def InterpolateFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def speed_interpolation(val):\n if val == 0.5:\n return 1.0\n elif val < 0.5:\n return low_interp(val)\n else:\n return hi_interp(val)", "def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x", "def _format_range(x_range):\n\n try:\n x1, x2 = x_range\n except (TypeError, ValueError):\n raise NNDCInputError(f'Range keyword arg must have two elements: \"{x_range}\"')\n try:\n if np.isfinite(x1):\n x1 = f\"{x1}\"\n else:\n x1 = \"\"\n except TypeError:\n x1 = \"\"\n try:\n if np.isfinite(x2):\n x2 = f\"{x2}\"\n else:\n x2 = \"\"\n except TypeError:\n x2 = \"\"\n return x1, x2", "def clip(x, min, max):\r\n # see decorator for function body\r\n # for grep: clamp, bound\r", "def multilerp(x, y, x_value):\n\t# Thanks Dom for helping me improve my bad code https://discordapp.com/channels/348658686962696195/535605770436345857/574778990821113876\n\tassert type(x)==list and type(y)==list, \"x and y must be lists.\"\n\tassert len(x) == len(y), \"x and y must be equal length.\"\n\t\n\t# assert x[0] <= x_value <= x[-1], \"Value is out of range. Exrapolation currently not supported.\\n\" + str(x) + \" \\n\" + str(y) + \" \\n\" + str(x_value)\n\t# Extrapolation not supported, just clamp the value.\n\tx_value = clamp(x_value, x[0], x[-1])\n\n\tfor i, e in enumerate(x):\n\t\tif(x[i] <= x_value <= x[i+1]):\n\t\t\tfactor = rlerp(x[i], x[i+1], x_value)\n\t\t\treturn lerp(y[i], y[i+1], factor)\n\t\n\tprint(\"Warning: Value was not in any of the ranges for multilerp().\")" ]
[ "0.6273798", "0.60789615", "0.6039472", "0.5930019", "0.59064054", "0.5874849", "0.57292944", "0.57271487", "0.57102585", "0.56955993", "0.56856304", "0.56849724", "0.56517243", "0.5645956", "0.561356", "0.56114006", "0.5609342", "0.56068665", "0.5606565", "0.557454", "0.55733204", "0.55727357", "0.5548828", "0.55394423", "0.5537453", "0.55369467", "0.5514052", "0.5489281", "0.5489079", "0.5487239" ]
0.61375684
1
Test that multiplying the power spectrum by some knots gives an accurate answer.
def check_change_power_spectrum(test_knotpos, test_knotval, matpow): #Get the modified power spectrum kval = matpow[:,0] newpk = lyasimulation.change_power_spectrum_knots(test_knotpos, test_knotval, matpow) #Check the kvalues are still the same for comparison to the transfer function assert np.all([k in newpk[:,0] for k in kval]) #Build interpolators for the new power spectrum #Only interpolate a subset of Pk for speed newpkint = build_restrict_interp(newpk, test_knotpos[0]/3., test_knotpos[-1]*3) #Build interpolator for old power spectrum pkint = build_restrict_interp(matpow, test_knotpos[0]/3., test_knotpos[-1]*3) #Build interpolator for knots ext_knotpos = np.concatenate([[kval[0],],test_knotpos, [kval[-1],]]) ext_knotval = np.concatenate([[test_knotval[0],],test_knotval, [test_knotval[-1],]]) knotint = interp.interp1d(ext_knotpos, ext_knotval, kind='linear') #Check that the interpolator works assert np.all(np.abs(knotint(test_knotpos) / test_knotval-1) < 1e-5) lg_knotpos = np.log(test_knotpos) #Check modification worked at the knot values assert np.all(np.abs(np.exp(newpkint(lg_knotpos)) / (np.exp(pkint(lg_knotpos)) * test_knotval) - 1) < 1e-3) #Pick some random k values distributed uniformly in log space krand = (lg_knotpos[-1]-lg_knotpos[0]+0.2)*np.random.random(250)+lg_knotpos[0]-0.1 #Check that the modification was accurate at random positions #print(np.max(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1))) assert np.all(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1) < 0.01)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_change_power_spectrum():\n #The 2010 paper had the knots at:\n #k = 0.475 0.75 1.19, 1.89\n #(knotpos, knotval)\n tests = [(np.array([0.475, 0.75, 1.19, 1.89]), np.array([0.475, 0.75, 1.19, 1.89])),\n (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 1., 1., 1.])),\n (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 0.5, 1.2, 0.5])),\n (np.array([0.05, 0.1, 10]), np.array([1.3, 0.3, 1.1]))]\n matpow = np.loadtxt(\"testdata/ics_matterpow_99.dat\")\n #Copy array so that we don't get changed in-place\n [check_change_power_spectrum(kp, kv, matpow) for (kp, kv) in tests]", "def test_sff_knots():\n n_points = 300\n time = np.concatenate((np.linspace(0, 20, int(n_points/3)),\n np.linspace(30, 78, int(n_points/3)),\n np.linspace(80, 100, int(n_points/3))\n ))\n lc = KeplerLightCurve(time=time,\n flux=np.random.normal(1.0, 0.1, n_points),\n centroid_col=np.random.normal(1.0, 0.1, n_points),\n centroid_row=np.random.normal(1.0, 0.1, n_points))\n lc.correct() # should not raise an exception", "def test_intra_power_law_fit2(self):\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, sparsify=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\n\t\texpected_alpha = 2.719 # close to exact ?\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=2)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)", "def test_knu(self):\n x = self.x\n result = bessel_sk.lnknu(x, self.maxorder)\n k_exp = [14.2670922632, 28.082603821, 172.7219309,\n 1825.996042640, 20431.0266426932]\n for i, l in enumerate(self.orders):\n assert_almost_equal(result[0,l]/k_exp[i], 1)", "def test_single_spectrum_with_positive_power(self):\n power = 0.69314718055\n nspec = 1\n assert np.isclose(cospectra_pvalue(power, nspec), 0.25)", "def knotsToFeetPerSecond(knots):\n return knots * 1.6878098571011957", "def eps_MTSI(omg, kx, ky, kz, prt=PlasmaParameters()):\n\n k2 = kz ** 2 + ky ** 2\n\n if k2 == 0:\n raise RuntimeError(\"The wave vector is Zero !!\")\n\n iEps = 1/omg**2\n eEpsz = prt.mi_over_me * ( kz**2 ) / ( (omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 * k2 )\n eEpsy = prt.mi_over_me * ( ky**2 ) / ( ((omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 - prt.electronCyclotronFrequency**2/ (prt.ionPlasmaFrequency/u.rad)**2)* k2 )\n\n return 1 - iEps - eEpsz - eEpsy", "def test_intra_power_law_fit(self):\n\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, randomize=False, vectors=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)", "def test_calc_kappa(test_coords):\n assert_almost_equal(calc_kappa(53.05187), 5762.687, decimal=3)", "def softplusk(x):\n return K.softplus(x) + 1e-10", "def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)", "def test_knu_generic(self):\n l = np.array([500])\n x = np.array([1e2])\n result = bessel_sk.lnknu(x, l[0])[0, -1]\n expected = (mpmath.log(mpmath.besselk(l[0]+0.5, x[0]))\n +0.5*mpmath.log(mpmath.pi/(2*x[0])) \n )\n assert_almost_equal(result/expected, 1)", "def test_Morlet():\n morl = cw.MorletWave()\n assert(np.isclose(morl(0), np.pi**(-1/4), atol=1.e-12))\n assert(np.isclose(morl.freq(0), 0, atol=1.e-12))", "def test_flux(equation):\n u = .5\n eps = 1e-5\n expected = (equation.flux(u+eps) - equation.flux(u))/eps\n computed = equation.flux_prime(u)\n npt.assert_allclose(computed, expected, rtol=1e-4)", "def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n self.assertFloatEqual((D, Pval), (0.46, 3.801e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n self.assertFloatEqual((D, Pval), (0.46, 5.084e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n self.assertFloatEqual((D,Pval), (0.48, 0.001772), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n self.assertFloatEqual((D,Pval), (6.9388939039072284e-18, 1.), eps=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)", "def power_criterion(p, X, k, test_locs, reg=1e-2, use_unbiased=True, use_2terms=False):\n n = X.shape[0]\n V = test_locs\n fssd = FSSD(p, k, V, null_sim=None)\n fea_tensor = fssd.feature_tensor(X)\n u_mean, u_variance = ustat_h1_mean_variance(\n fea_tensor, return_variance=True, use_unbiased=use_unbiased\n )\n\n # mean/sd criterion\n sigma_h1 = np.sqrt(u_variance + reg)\n ratio = old_div(u_mean, sigma_h1)\n if use_2terms:\n obj = old_div(-1.0, (np.sqrt(n) * sigma_h1)) + np.sqrt(n) * ratio\n else:\n obj = ratio\n return obj", "def test_double_ended_ols_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass", "def test_knu(self):\n lmax = 3\n x = np.array([500])\n result = bessel_sk.lnknu(x, lmax)\n pih = np.log(0.5*np.pi)\n expected = np.array([pih -x - 1*np.log(x),\n pih -x - 2*np.log(x) + np.log(x+1),\n pih -x - 3*np.log(x) + np.log(x**2+3*x+3),\n pih -x - 4*np.log(x) + np.log(x**3+6*x**2+15*x+15)\n ])\n assert_almost_equal(result[0]/expected.T, 1)", "def test_energy_flux_conversion(self):\n init_wl = np.linspace(300, 500, num=10)\n init_spec = np.ones(init_wl.shape)\n\n test_spec_base = Spectrum(init_wl, init_spec, x_unit='nm', is_photon_flux=True)\n spectrum = test_spec_base.get_spectrum(to_x_unit='nm')\n\n # Prepare an expected spectrum for comparsion\n expect_spec = init_spec * sc.h * sc.c / (init_wl*1e-9)\n\n # Since the values of the spectrum are very small, causing the errors in np.isclose()\n # ( both are in the order of ~1e-19) Need renormalise them for proper comparison.\n assert np.all(np.isclose(spectrum[1, :] * 1e19, expect_spec * 1e19))", "def test_kempton_taylor_q(self):\n c = array([2,3,3,3,3,3,4,4,4,6,6,7,7,9,9,11,14,15,15,20,29,33,34,\n 36,37,53,57,138,146,170])\n self.assertFloatEqual(kempton_taylor_q(c), 14/log(34/4))", "def test_powell(self):\n fun = get_problem('powell', dimension=2, lower=-4, upper=5)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)", "def test_photon_flux_conversion(self):\n init_wl = np.linspace(300, 500, num=10)\n init_spec = np.ones(init_wl.shape)\n\n test_spec_base = Spectrum(init_wl, init_spec, 'nm', is_photon_flux=False)\n spectrum = test_spec_base.get_spectrum('nm', to_photon_flux=True)\n\n expect_spec = init_spec / (sc.h * sc.c / (init_wl*1e-9))\n\n assert np.all(np.isclose(spectrum[1, :], expect_spec))", "def test_k2c():\n assert temperatura.k2c(273.15) == 0", "def test_double_ended_ols_wls_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5)\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=5)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=6)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)", "def test_correctness(self):\n M_win = 1024\n N_fft = 131072\n # Set norm=False for correctness as the values obtained from the\n # scientific publication do not normalize the values. Normalizing\n # changes the sidelobe level from the desired value.\n w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False)\n f = fft(w, N_fft)\n spec = 20 * np.log10(np.abs(f / np.amax(f)))\n\n first_zero = np.argmax(np.diff(spec) > 0)\n\n PSLL = np.amax(spec[first_zero:-first_zero])\n\n BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win\n BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win\n\n assert_allclose(PSLL, -35.1672, atol=1)\n assert_allclose(BW_3dB, 1.1822, atol=0.1)\n assert_allclose(BW_18dB, 2.6112, atol=0.1)", "def test_c2k():\n assert temperatura.c2k(0) == 273.15", "def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')", "def func_hollomon(eps,k,n):\n return k * eps**n", "def test_calculate_heat_loss_kwh():\n delta_t = pd.Series(\n [12.42, 12.23, 10.85, 9.65, 7.15, 4.85, 3.0, 3.28, 5.03, 7.71, 10.38, 11.77],\n index=[\n \"jan\",\n \"feb\",\n \"mar\",\n \"apr\",\n \"may\",\n \"jun\",\n \"jul\",\n \"aug\",\n \"sep\",\n \"oct\",\n \"nov\",\n \"dec\",\n ],\n )\n hours = pd.Series(\n [d * 24 for d in [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]],\n index=[\n \"jan\",\n \"feb\",\n \"mar\",\n \"apr\",\n \"may\",\n \"jun\",\n \"jul\",\n \"aug\",\n \"sep\",\n \"oct\",\n \"nov\",\n \"dec\",\n ],\n )\n heat_loss_coefficient = pd.Series(\n [\n 121,\n 150,\n ]\n )\n expected_output = np.array(\n [\n 1118.09808,\n 994.44576,\n 976.7604,\n 840.708,\n 643.6716,\n 422.532,\n 270.072,\n 295.27872,\n 438.2136,\n 694.08504,\n 904.3056,\n 1059.58248,\n 1386.072,\n 1232.784,\n 1210.86,\n 1042.2,\n 797.94,\n 523.8,\n 334.8,\n 366.048,\n 543.24,\n 860.436,\n 1121.04,\n 1313.532,\n ]\n )\n\n output = htuse._calculate_heat_loss_kwh(\n heat_loss_coefficient=heat_loss_coefficient,\n delta_t=delta_t,\n hours=hours,\n )\n\n assert_array_almost_equal(output, expected_output)", "def test_MFT_flux_conservation(centering='FFTSTYLE', outdir=None, outname='test_MFT_flux', precision=0.01):\n\n # Set up constants for either a more precise test or a less precise but much \n # faster test:\n print(\"Testing MFT flux conservation for centering = \"+centering)\n if precision ==0.001:\n npupil = 800\n npix = 4096\n u = 400 # of lam/D. Must be <= the Nyquist frequency of the pupil sampling or there\n # will be aliased copies of the PSF.\n elif precision==0.01: \n npupil = 400\n npix = 2048\n u = 200 # of lam/D. Must be <= the Nyquist frequency of the pupil sampling or there\n # will be aliased copies of the PSF.\n else:\n raise NotImplementedError('Invalid value for precision.')\n\n\n\n # Create pupil\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n pupil = makedisk(s=(npupil, npupil), c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n if outdir is not None:\n fits.PrimaryHDU(pupil.astype(np.float32)).writeto(outdir+os.sep+outname+\"pupil.fits\", clobber=True)\n\n # MFT setup style and execute\n mft = matrixDFT.MatrixFourierTransform(centering=centering, verbose=True)\n a = mft.perform(pupil, u, npix)\n\n pre = (abs(pupil)**2).sum() # normalized area of input pupil, should be 1 by construction\n post = (abs(a)**2).sum() # \n ratio = post / pre\n print \"Pre-FFT total: \"+str( pre)\n print \"Post-FFT total: \"+str( post )\n print \"Ratio: \"+str( ratio)\n\n\n\n if outdir is not None:\n complexinfo(a, str=\"mft1 asf\")\n asf = a.real.copy()\n fits.PrimaryHDU(asf.astype(np.float32)).writeto(outdir+os.sep+outname+\"asf.fits\", clobber=True)\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n #SF.SimpleFitsWrite(fn=outdir+os.sep+outname+\"psf.fits\", data=psf.astype(np.float32), clobber='y')\n fits.PrimaryHDU(psf.astype(np.float32)).writeto(outdir+os.sep+outname+\"psf.fits\", clobber=True)\n\n assert np.abs(1.0 - ratio) < precision" ]
[ "0.69962776", "0.6277038", "0.6117738", "0.6112239", "0.60500044", "0.60259444", "0.6025436", "0.60241675", "0.602194", "0.5997189", "0.59961194", "0.59884477", "0.5953086", "0.5942114", "0.58822083", "0.5864141", "0.58592874", "0.5849431", "0.58364874", "0.581356", "0.5805899", "0.5793469", "0.57821876", "0.57714677", "0.5769905", "0.57552916", "0.573672", "0.57317483", "0.57066464", "0.56818485" ]
0.71301085
0
Perform the power spectrum check for a number of different knot values and positions
def test_change_power_spectrum(): #The 2010 paper had the knots at: #k = 0.475 0.75 1.19, 1.89 #(knotpos, knotval) tests = [(np.array([0.475, 0.75, 1.19, 1.89]), np.array([0.475, 0.75, 1.19, 1.89])), (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 1., 1., 1.])), (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 0.5, 1.2, 0.5])), (np.array([0.05, 0.1, 10]), np.array([1.3, 0.3, 1.1]))] matpow = np.loadtxt("testdata/ics_matterpow_99.dat") #Copy array so that we don't get changed in-place [check_change_power_spectrum(kp, kv, matpow) for (kp, kv) in tests]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_change_power_spectrum(test_knotpos, test_knotval, matpow):\n #Get the modified power spectrum\n kval = matpow[:,0]\n newpk = lyasimulation.change_power_spectrum_knots(test_knotpos, test_knotval, matpow)\n #Check the kvalues are still the same for comparison to the transfer function\n assert np.all([k in newpk[:,0] for k in kval])\n #Build interpolators for the new power spectrum\n #Only interpolate a subset of Pk for speed\n newpkint = build_restrict_interp(newpk, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for old power spectrum\n pkint = build_restrict_interp(matpow, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for knots\n ext_knotpos = np.concatenate([[kval[0],],test_knotpos, [kval[-1],]])\n ext_knotval = np.concatenate([[test_knotval[0],],test_knotval, [test_knotval[-1],]])\n knotint = interp.interp1d(ext_knotpos, ext_knotval, kind='linear')\n #Check that the interpolator works\n assert np.all(np.abs(knotint(test_knotpos) / test_knotval-1) < 1e-5)\n lg_knotpos = np.log(test_knotpos)\n #Check modification worked at the knot values\n assert np.all(np.abs(np.exp(newpkint(lg_knotpos)) / (np.exp(pkint(lg_knotpos)) * test_knotval) - 1) < 1e-3)\n #Pick some random k values distributed uniformly in log space\n krand = (lg_knotpos[-1]-lg_knotpos[0]+0.2)*np.random.random(250)+lg_knotpos[0]-0.1\n #Check that the modification was accurate at random positions\n #print(np.max(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1)))\n assert np.all(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1) < 0.01)", "def check_wfs(dg):\n # load dsp results\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n \n # load waveforms\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_wfs = sto.read_object('ORSIS3302DecoderForEnergy/raw/waveform', f_raw)\n \n # energy cut\n et = 'trapEmax'\n # elo, ehi = 8000, 16000\n # elo, ehi = 8000, 10000\n elo, ehi = 12000, 13000\n \n # dcr cut\n # alp_lo, alp_hi = -0.5, 0.5\n # gam_lo, gam_hi = 0.8, 1.2\n \n # aoe cut\n alp_lo, alp_hi = 0.064, 0.068\n gam_lo, gam_hi = 0.05, 0.06\n \n # selection\n idx_alp = df_dsp[et].loc[(df_dsp[et] > elo) & (df_dsp[et] < ehi) & \n (df_dsp.aoe > alp_lo) & (df_dsp.aoe < alp_hi)].index\n \n idx_gam = df_dsp[et].loc[(df_dsp[et] > elo) & (df_dsp[et] < ehi) & \n (df_dsp.aoe > gam_lo) & (df_dsp.aoe < gam_hi)].index\n \n wfs_alp = tb_wfs['values'].nda[idx_alp]\n wfs_gam = tb_wfs['values'].nda[idx_gam]\n \n print(f'found {wfs_alp.shape[0]} alpha candidates')\n print(f'found {wfs_gam.shape[0]} gamma candidates')\n \n # plot \n # fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n ts = np.arange(0, wfs_gam.shape[1], 1)\n\n n_gam = 10 if wfs_gam.shape[0] > 10 else wfs_gam.shape[0]\n for iwf in range(n_gam):\n max = np.amax(wfs_gam[iwf,:])\n # max = df_dsp[et].values[iwf]\n plt.plot(ts[:-1], wfs_gam[iwf,:-1]/max, '-b', lw=1, alpha=0.5)\n \n n_alp = 10 if wfs_alp.shape[0] > 10 else wfs_alp.shape[0]\n for iwf in range(n_alp):\n max = np.amax(wfs_alp[iwf,:])\n # max = df_dsp[et].values[iwf]\n plt.plot(ts[:-1], wfs_alp[iwf,:-1]/max, '-r', lw=1, alpha=0.5)\n\n # plt.xlim(1\n\n plt.xlabel('time (clock ticks)', ha='right', x=1)\n plt.ylabel('ADC', ha='right', y=1)\n plt.show()", "def isthmus1D(cube):\n \n return countComponents26(cube) >= 2;", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def isConsistant(spectrum,kmer): \n return subset_spectrum(spectrum,linear_spectrum(kmer))", "def get_power_spectra(self, n_psbins=50, k_min=None, k_max=None, save=True):\n\n if self.Park19:\n # chunk_z_list_HERA = [27.408, 20.306, 16.0448, 13.204, 11.17485714,\n # 9.653, 8.46933333, 7.5224, 6.74763636, 6.102,\n # 5.55569231, 5.08742857]\n chunk_z_list_HERA = [27.15742, 22.97586, 19.66073, 16.98822, 14.80234,\n 12.99172, 11.4751, 10.19206, 9.09696, 8.15475,\n 7.33818, 6.62582, 6.0006]\n else:\n chunk_z_list_HERA = [27.4, 23.4828, 20.5152, 18.1892, 16.3171, 14.7778, 13.4898, 12.3962,\n 11.4561, 10.6393, 9.92308, 9.28986, 8.72603, 8.22078, 7.76543,\n 7.35294, 6.97753, 6.63441, 6.31959, 6.0297, 5.7619, 5.51376, 5.28319,\n 5.06838]#, 4.86777, 4.68]\n\n if self.lightcones is None:\n self.get_lightcones()\n chunk_indices_HERA = [np.argmin(np.abs(self.lc_redshifts - z_HERA)) for z_HERA in chunk_z_list_HERA][::-1]\n\n if self.vb: print(f' Making powerspectra in {len(chunk_z_list_HERA)} chunks')\n\n self.PS = {}\n use_ETHOS = self.lightcones[0].flag_options.pystruct['USE_ETHOS']\n\n for lc in self.lightcones:\n if use_ETHOS:\n h_PEAK = np.round(lc.astro_params.pystruct['h_PEAK'],1)\n key = f'h_PEAK={h_PEAK:.1f}'\n else:\n key = self.cosmology\n\n theta = lc.astro_params.pystruct[self.param_21cmfast]\n\n if self.param == 'k_PEAK':\n theta = 1./theta**self.k_PEAK_order\n\n if self.param == 'L_X' or 'F' in self.param or self.param == 'M_TURN':\n theta = np.log10(theta) # make L_X, F log10\n\n if key not in self.PS:\n self.PS[key] = {} ##### TODO load PS nicely\n\n if self.vb: print(f' Getting PS for {key}, {self.param}={theta}')\n\n # Make PS\n if k_min is None:\n k_min = self.k_fundamental\n if k_max is None:\n k_max = self.k_max\n\n self.PS_z_HERA, self.PS[key][f'{self.param}={theta}'] = powerspectra_chunks(lc,\n n_psbins=n_psbins,\n chunk_indices=chunk_indices_HERA,\n k_min=k_min,\n k_max=k_max)\n\n del lc\n\n if save:\n np.save(self.PS_file, self.PS, allow_pickle=True)\n if self.vb: print(f' saved PS to {self.PS_file}')\n\n np.save(self.PS_z_HERA_file, self.PS_z_HERA, allow_pickle=True)\n if self.vb: print(f' saved PS_z_HERA to {self.PS_z_HERA_file}')\n\n return", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n if code == self.hotp(k, counter):\n return True\n return False", "def test_ks_test(mode):\n indices = np.random.randint(0, 1000, 1000)\n out = compute_indices_ks_test(indices, 1000, mode=mode)\n assert all([o > 0.0 for o in out])", "def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames", "def fun_hklcheck(self, event=None):\n\n self.fun_get()\n hkl = self.hkl_check.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n I = self.xtl.Scatter.intensity(hkl)\n\n unit = self.powder_units.get()\n energy = self.energy_kev.get()\n tth = self.xtl.Cell.tth(hkl, energy)\n\n if unit.lower() in ['tth', 'angle', 'twotheta', 'theta', 'two-theta']:\n self.hkl_result.set('I:%10.0f TTH:%8.2f' % (I, tth))\n elif unit.lower() in ['d', 'dspace', 'd-spacing', 'dspacing']:\n q = fc.calqmag(tth, energy)\n d = fc.q2dspace(q)\n self.hkl_result.set(u'I:%10.0f d:%8.2f \\u00c5' % (I, d))\n else:\n q = fc.calqmag(tth, energy)\n self.hkl_result.set(u'I:%8.0f Q:%8.2f \\u00c5\\u207B\\u00B9' % (I, q))", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False", "def QDot_detection(self):\r\n\r\n # Creates a list with the total intensities from the lines as to analyze which lines contain quantum dots\r\n total_intensity_list = []\r\n for (columnName, columnData) in self.df4.iteritems():\r\n total_intensity = 0\r\n for i in columnData.values:\r\n total_intensity += i\r\n total_intensity_list.append(total_intensity)\r\n \r\n # Construct the 3-sigma threshold\r\n avg_tot_intensity = mean(total_intensity_list)\r\n stdev_tot_intensity = stdev(total_intensity_list)\r\n\r\n threshold = 3 * stdev_tot_intensity + avg_tot_intensity\r\n\r\n QDot_slits = [total_intensity_list.index(i) + 1 for i in total_intensity_list if i >= threshold]\r\n \r\n # If 2 lines next to each other are labeled as quantum dots, the slit with the lowest total intensity will be discarded\r\n to_be_deleted_slits = []\r\n for i in range(0, len(QDot_slits) - 1):\r\n if QDot_slits[i + 1] - QDot_slits[i] == 1:\r\n if total_intensity_list[QDot_slits[i + 1] - 1] > total_intensity_list[QDot_slits[i] - 1]:\r\n to_be_deleted_slits.append(QDot_slits[i])\r\n elif total_intensity_list[QDot_slits[i + 1] - 1] < total_intensity_list[QDot_slits[i] - 1]:\r\n to_be_deleted_slits.append(QDot_slits[i + 1])\r\n \r\n for slit in to_be_deleted_slits:\r\n QDot_slits.remove(slit)\r\n\r\n # Optional code to plot the total intensities of every slit in the 2d map.\r\n # -------------------------------------------------------------------------\r\n # fig = plt.figure(figsize=(10,7))\r\n # plt.plot(total_intensity_list, label=\"Total intensity\")\r\n # plt.plot([x - 1 for x in QDot_slits], [total_intensity_list[x - 1] for x in QDot_slits], 'rx', label=\"SI-NP\")\r\n # plt.hlines(avg_tot_intensity, 0, 200, colors='red', label=\"Average total intensity\")\r\n # plt.hlines(threshold, 0, 200, colors='green', label='3-sigma threshold')\r\n # plt.title(\"Total intensities for a single datafile\")\r\n # plt.xlabel(\"Position along the slit (pixels)\")\r\n # plt.ylabel(\"Total intensity (arbitrary units)\")\r\n # plt.xlim(0,200)\r\n # plt.ylim(0,60)\r\n # plt.legend()\r\n # plt.show()\r\n\r\n return QDot_slits", "def echo_fun(self, k):\n same_all = self.my_alll(self.id) == self.get_echo_all_j(k)\n ok_deg = ((self.degree(k) - self.degree(self.id)) % 6) in {0, 1}\n return self.echo_no_all(k) and same_all and ok_deg", "def ks_test(timeseries):\r\n\r\n hour_ago = time() - 3600\r\n ten_minutes_ago = time() - 600\r\n reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])\r\n probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])\r\n\r\n if reference.size < 20 or probe.size < 20:\r\n return False\r\n\r\n ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)\r\n\r\n if ks_p_value < 0.05 and ks_d > 0.5:\r\n adf = sm.tsa.stattools.adfuller(reference, 10)\r\n if adf[1] < 0.05:\r\n return True\r\n\r\n return False", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results", "def test_sm_spectrum(self):\n\n spectrum_mg = np.asarray([3.41707366e-02, 1.02592426e-02, 3.20641729e-03, 9.63481603e-04,\n 2.81233386e-04, 8.12019322e-05, 2.13711295e-05, 5.30226309e-06,\n 1.14687576e-06])\n # Number of SM events generated in MG [66095., 25637., 33458., 48654., 18351., 6849., 59869., 32043., 9044.]\n\n s = 13e3**2\n logbins = np.linspace(np.log10(200),np.log10(2000),10)\n bins = 10**logbins\n nbins = len(bins)-1\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n width = bins[i+1]-bins[i]\n spectrum = pplnu.sigma_qqlnu_int(s, bins[i], bins[i+1], 'mu', 0, par2, center**2, 0, newphys=False)*GeVtopb/width\n err = (spectrum-spectrum_mg[i])/spectrum_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def fn(mask):\n for i in range(len(piles)): \n val = (mask >> 3*i) & 7\n for k in range(1, val+1): \n mask0 = mask - (k << 3*i)\n if not fn(mask0): return True \n return False", "def checkSymbolOverlap(self,noError,centres,idxSymbol,dataBits,trustSymbol):\n\n \n # It showed that when there is an overlap between the last window and the current in the first ~spSym samples.\n # Therefore, we only look at the samples after this overlap\n tO = time.time()\n startOverlap = np.where(centres >= (self.sigOverlapWin))[0][0] # we need to include the boundary once\n endOverlap = np.where(centres > (self.Nfft - self.sigOverlapWin))[0][0]\n \n idxSymbolWin = dataBits[startOverlap:endOverlap]\n idxSymbolPreWin = dataBits[:startOverlap]\n idxSymbolPostWin = dataBits[endOverlap:]\n\n if log.level == logging.DEBUG:\n try:\n log.debug(f'len bits {len(idxSymbol)}\\tlen centres {len(centres)}')\n log.debug(f'\\npreWin: {idxSymbolPreWin.astype(np.int)}\\nWinLast: {self.posSymEnd.astype(np.int)}\\nCurWinStart: {idxSymbolWin[:len(self.poswinP)].astype(np.int)} \\nposSymLast: {self.poswinP.astype(np.int)}')\n except:\n pass\n \n tmpL = len(idxSymbolPreWin) + self.overlapOffset\n tmpP = len(idxSymbolPostWin) + self.overlapOffset\n\n if log.level == logging.DEBUG:\n log.debug('centresOffset[0] ' + str(centres[startOverlap] - self.sigOverlapWin) + '\\tcentresOffset[-1] ' + str(self.Nfft-self.sigOverlapWin-centres[endOverlap]))\n try:\n if noError > self.symbol_check_error_threshold:\n if log.level == logging.DEBUG:\n log.debug('Too many symbol errors')\n elif len(self.poswinP) > 0:\n if log.level == logging.DEBUG:\n log.debug('Check symbol overlap')\n if np.all(self.poswinP[:self.overlapOffset] == idxSymbolWin[:self.overlapOffset]) or np.all(self.posSymEnd[-self.overlapOffset:] == idxSymbolPreWin[-self.overlapOffset:]):\n if log.level == logging.DEBUG:\n log.debug('overlap good')\n else:\n # compute matches\n symPre = np.sum(self.poswinP[:self.overlapOffset] == idxSymbolWin[:self.overlapOffset])\n symPos =np.sum(self.posSymEnd[-self.overlapOffset:] == idxSymbolPreWin[-self.overlapOffset:])\n symEarlyPre = np.sum(self.poswinP[:self.overlapOffset] == idxSymbolWin[1:self.overlapOffset+1])\n symEarlyPos = np.sum(self.posSymEnd[-self.overlapOffset-1:-1] == idxSymbolPreWin[-self.overlapOffset:])\n symLatePre = np.sum(self.poswinP[1:self.overlapOffset+1] == idxSymbolWin[0:self.overlapOffset])\n symLatePos = np.sum(self.posSymEnd[-self.overlapOffset:] == idxSymbolPreWin[-self.overlapOffset-1:-1])\n if log.level == logging.DEBUG:\n log.debug('sum posw == symbWin %d\\t sum posw == symbWinE %d\\t sum posw == symbWinL %d'\n %(symPre,\n symEarlyPre,\n symLatePre))\n log.debug('sum symO == symPre %d\\t sum symO == symbPreE %d\\t sum symO == symbPreL %d'\n %(symPos,\n symEarlyPos,\n symLatePos))\n log.debug('poswin [-%d ; -1] %s' %(self.overlapOffset+1, str(self.posSymEnd[-self.overlapOffset-1:-1].astype(np.int))))\n log.debug('prewin [-%d ; -1] %s' %(self.overlapOffset+1, str(idxSymbolPreWin[-self.overlapOffset-1:-1].astype(np.int))))\n\n \n maxPre = np.max((symPre,symEarlyPre,symLatePre))\n maxPos = np.max((symPos,symEarlyPos,symLatePos))\n \n # check early\n if self.symbol_check_match_threshold < symEarlyPre and symEarlyPre == maxPre:\n if log.level == logging.DEBUG:\n log.debug('posWin[:%d]==symbolWin[1:%d] passed' %(self.overlapOffset,self.overlapOffset+1))\n if self.symbol_check_match_threshold < symEarlyPos and symEarlyPos == maxPos:\n if log.level == logging.DEBUG:\n log.debug('removed first bit')\n startOverlap += 1\n idxSymbolWin = idxSymbolWin[1:]\n # Check late\n elif self.symbol_check_match_threshold < symLatePre and symLatePre == maxPre:\n if log.level == logging.DEBUG:\n log.debug('posWin[1:%d]==symbolWin[:%d] passed' % (self.overlapOffset+1,self.overlapOffset)) \n if self.symbol_check_match_threshold < symLatePos and symLatePos == maxPos:\n log.debug('inserted first bit')\n startOverlap -= 1\n idxSymbolWin = np.r_[idxSymbolPreWin[-1], idxSymbolWin]\n if log.level == logging.DEBUG:\n log.debug('new postWin last ' + str(self.poswinP[:self.overlapOffset].astype(np.int)))\n log.debug('new symbolsWin current ' + str(idxSymbolWin[:self.overlapOffset].astype(np.int)))\n\n else:\n if log.level == logging.DEBUG:\n log.debug('Skipping bit alignment, reason: no poswin saved')\n except Exception as e:\n log.error('symbol overlap failed. reason:')\n log.exception(e)\n\n\n dataBitsWin = dataBits[startOverlap:endOverlap]\n dataBitsPreWin = dataBits[:startOverlap]\n dataBitsPostWin = dataBits[endOverlap:]\n trustSymbolWin = trustSymbol[startOverlap:endOverlap]\n centresWin = centres[startOverlap:endOverlap]\n\n \n self.poswinP = dataBitsPostWin \n try:\n self.posSymEnd = dataBitsWin[- self.overlapOffset-1:] # one bit extra\n except Exception:\n # just to guard the case that there are less than 10 symbols in the window\n log.error('Symbols for offset checking not saved -- Less than 10 symbols in the window')\n\n if log.level == logging.DEBUG:\n log.debug('time overlap %f',time.time()-tO)\n\n\n return centresWin,dataBitsWin,trustSymbolWin,idxSymbolWin", "def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def ThresholdSpectrumColoring(self, k): \n # inicializamos una coloracion y limpiamos la memoria \n semi_coloring = self._new_coloring()\n # reducimos el numero de colores del espectro a k\n spectrum = self._spectrum[:k]\n # vertices = self.vertices()\n n_colored = 0\n n_vertices = len(self.vertices())\n while n_colored < n_vertices:\n # tomamos el vertices con mayor grado entre los no visitados\n vertex = self._max_vdegree_with_sdegree(semi_coloring)\n self._vertex_order.append(vertex)\n # tomamos el color con la menor potencial interferencia para el vertice \n color = self._min_semi_interference(vertex, semi_coloring, spectrum)\n # asignamos el color al vertice\n semi_coloring[vertex] = color\n n_colored+=1\n # actualizamos los valores de las memorias\n self._update_values(vertex, color, semi_coloring)\n # tomamos el vertice de mayor grado no adyacente al que seleccionamos\n # anteriormente, y en caso de que exista, repetimos el mismo proceso\n fneighbour = self._max_vdegree_with_sdegree(semi_coloring, self._graph.neighbours(vertex))\n if fneighbour is not None:\n self._vertex_order.append(fneighbour)\n color = self._min_semi_interference(fneighbour, semi_coloring, spectrum)\n semi_coloring[fneighbour] = color\n n_colored+=1 \n self._update_values(fneighbour, color, semi_coloring)\n return self.threshold(semi_coloring), semi_coloring", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def power_spectra_from_spike_times(s_times, clust_nums, channel_file, rel_start_ms, rel_stop_ms, freqs,\n noise_freq=[58., 62.], downsample_freq=250, mean_over_spikes=True):\n\n # make a df with 'stTime' column for epoching\n events = pd.DataFrame(data=np.stack([s_times, clust_nums], -1), columns=['stTime', 'cluster_num'])\n\n # load channel data\n signals, timestamps, sr = load_ncs(channel_file)\n\n # downsample the session\n if downsample_freq is not None:\n signals, timestamps, sr = _my_downsample(signals, timestamps, sr, downsample_freq)\n else:\n print('I HIGHLY recommend you downsample the data before computing power across the whole session...')\n print('You will probably run out of memory.')\n\n # make into timeseries\n eeg = TimeSeries.create(signals, samplerate=sr, dims=['time'], coords={'time': timestamps / 1e6})\n\n # filter line noise\n if noise_freq is not None:\n if isinstance(noise_freq[0], float):\n noise_freq = [noise_freq]\n for this_noise_freq in noise_freq:\n b_filter = ButterworthFilter(eeg, this_noise_freq, filt_type='stop', order=4)\n eeg = b_filter.filter()\n\n # compute power\n wave_pow = MorletWaveletFilter(eeg, freqs, output='power', width=5, cpus=12, verbose=False).filter()\n\n # log the power\n data = wave_pow.data\n wave_pow.data = numexpr.evaluate('log10(data)')\n\n # get start and stop relative to the spikes\n epochs = _compute_epochs(events, rel_start_ms, rel_stop_ms, timestamps, sr)\n bad_epochs = (np.any(epochs < 0, 1)) | (np.any(epochs > len(signals), 1))\n epochs = epochs[~bad_epochs]\n events = events[~bad_epochs].reset_index(drop=True)\n\n # mean over time within epochs\n spikes_x_freqs = np.stack([np.mean(wave_pow.data[:, x[0]:x[1]], axis=1) for x in epochs])\n\n # make dict with keys being cluster numbers. Mean over spikes if desired.\n pow_spect_dict = {}\n for this_cluster in events.cluster_num.unique():\n if mean_over_spikes:\n pow_spect_dict[this_cluster] = spikes_x_freqs[events.cluster_num == this_cluster].mean(axis=0)\n else:\n pow_spect_dict[this_cluster] = spikes_x_freqs[events.cluster_num == this_cluster]\n\n return pow_spect_dict", "def test_fitting_accuracy(self):\r\n # Instantiate spectrum object, calibrate peak shape and fit all peaks\r\n spec = emg.spectrum(df=self.data,show_plot=False)\r\n spec.detect_peaks(thres=0.0053, plot_smoothed_spec=False,\r\n plot_2nd_deriv=False, plot_detection_result=False)\r\n msg0 = \"Incorrect number of peaks detected.\"\r\n assert len(spec.peaks) == len(self.true_mus), msg0\r\n spec.assign_species([\"Ni58:-1e\",\"Co58:-1e\",\"Mn58?:-1e\",\"Sn116:-2e\"])\r\n spec.assign_species(\"Mn58m?:-1e\", peak_index=2, Ex=71.77, Ex_error=0.05)\r\n spec.determine_peak_shape(species_shape_calib=\"Mn58m?:-1e\",\r\n show_plots=False)\r\n spec.fit_peaks(species_mass_calib=\"Ni58:-1e\",show_plots=False)\r\n\r\n # Perform accuracy checks\r\n for p in spec.peaks:\r\n if p.species == \"Ni58:-1e\":\r\n continue # skip calibrant\r\n msg1 = \"ME deviates from literature by more than 1 sigma.\"\r\n assert p.m_dev_keV <= p.mass_error_keV, msg1\r\n\r\n # Check calculation of (atomic) ME for doubly charged species\r\n if p.species == \"Sn116:-2e\":\r\n ME_dev_keV = p.atomic_ME_keV - self.ME_Sn116_keV\r\n msg2 = str(\"Respective deviation of ionic mass and atomic mass \"\r\n \"excess from literature differ by > 1 sigma for \"\r\n \"Sn116:-2e.\")\r\n assert abs(ME_dev_keV - p.m_dev_keV) < p.mass_error_keV, msg2", "def check(self, data_input, debug_flag):\n self.results = [ [], [], [], False, [] ]\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n\n #Step1: b vs a\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"b_a_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n\n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"b_a_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: c vs b\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"c_b_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[1].append(_result)\n\n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"c_b_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[1].append(_result)\n\n \n #Step3: c vs a\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"c_a_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[2].append(_result)\n\n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"c_a_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[2].append(_result)\n\n\n #Step4: Check if this a-b-c is valid or not and which pattern can be chosen\n self.results[3], self.results[4] = self.check_type(data_input, debug_flag)\n\n\n #Step5: return the results\n return self.results", "def tdmSpectrum(channelWidth, nchan):\n if ((channelWidth >= 15e6/2. and nchan>240) or # 1 pol TDM, must avoid 240-chan FDM\n (channelWidth >= 15e6)):\n# (channelWidth >= 15e6 and nchan<96) or # 2 pol TDM (128 chan)\n# (channelWidth >= 30e6 and nchan<96)): # 4 pol TDM (64 chan)\n return True\n else:\n return False" ]
[ "0.6544442", "0.5945773", "0.5757839", "0.5726125", "0.56503886", "0.5619751", "0.5599943", "0.5566535", "0.55061376", "0.54927444", "0.5486352", "0.54780996", "0.546993", "0.5465748", "0.5458723", "0.5456594", "0.5424176", "0.5421518", "0.5421518", "0.5421518", "0.5421144", "0.5420935", "0.54084444", "0.5401628", "0.54008406", "0.536784", "0.5359124", "0.5358815", "0.5356505", "0.53533316" ]
0.63642925
1
Returns the module ID
def identifier(self): mod_id = self.read16(regAddr=0x8000) # print("Inserted module is", self.cfpDict[mod_id]) # print("Inserted module is", mod_id) return mod_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(self):\n return \"non_existing_module_required_plugin\"", "def get_id(self):\n return \"non_existing_and_existing_module_required_plugin\"", "def get_id_by_module(self, module_id: str) -> str:\n for labware_id, labware in self.state.labware_by_id.items():\n if (\n isinstance(labware.location, ModuleLocation)\n and labware.location.moduleId == module_id\n ):\n return labware_id\n\n raise errors.exceptions.LabwareNotLoadedOnModuleError(\n \"There is no labware loaded on this Module\"\n )", "def get_module_name(self):\n return self.module_name", "def module_name(self):\n return self.lib.get_module_name()", "def __get_uuid_by_id(self, id_: int) -> int:\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None", "def module_set_id(self) -> str:\n fnames = sorted([\"@\".join(m) for m in self.schema_data.modules])\n return hashlib.sha1(\"\".join(fnames).encode(\"ascii\")).hexdigest()", "def get_module_task_instance_id(task_instances):\n for id in task_instances:\n if task_instances[id] == 'module_node':\n return id\n return None", "def get_unique_id(self, app):\n if self.module_unique_id:\n return self.module_unique_id\n\n if self.form_module_id:\n return f\"{self.form_module_id}.{self.form_id}\"\n\n # legacy data does not have 'form_module_id'\n form = app.get_form(self.form_id)\n return f\"{form.get_module().unique_id}.{self.form_id}\"", "def get_module(self):\n return self.module", "def module_name(self):\n return self.name()", "def module_name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__\n return None", "def module_name(self):\n return self.name", "def module(self):\n return self.lib.module", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def get_full_id(self, module_name, file_name):\n\n host_id = self.hostname\n process_id = self.os_pid\n thread_id = self.get_thread_id()\n\n file_basename = os.path.basename(file_name) # remove the path for better readability\n full_id = \"host={0} filename={1} module={2} process={3} thread={4}\".format(host_id, file_basename, module_name,\n process_id, thread_id)\n\n return full_id", "def module_uuid():\r\n yield uuid.uuid4()", "def getID():", "def getID(self) -> int:\n ...", "def module_name(self):\n return self.config_section", "def get_id(self):\n return \"required_modules_exists_but_condition_is_false_plugin\"", "def get_last_student_module_id(self):\r\n cursor = connection.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT max(student_module_id) FROM courseware_studentmodulehistory\r\n \"\"\")\r\n last = cursor.fetchone()[0]\r\n self.say(\"Last student_module_id is {}\".format(last))\r\n return last" ]
[ "0.7149746", "0.7103554", "0.7022028", "0.69918215", "0.69384813", "0.6864412", "0.6813897", "0.68095", "0.67557555", "0.6731446", "0.6668896", "0.66298217", "0.66143405", "0.6576944", "0.65093726", "0.65093726", "0.65093726", "0.65093726", "0.6486482", "0.6486482", "0.6486482", "0.6486482", "0.6486482", "0.6445807", "0.6410554", "0.6399897", "0.63949627", "0.6356368", "0.634154", "0.6288584" ]
0.79656553
0
Convert an object to a timedelta. If the object is a timedelta, it is returned; if it is a numeric type, the value is interpreted as seconds and that is returned.
def to_timedelta(obj: "Any") -> "timedelta": if obj is None: raise ValueError("obj cannot be None") if isinstance(obj, timedelta): return obj elif isinstance(obj, (int, float)): return timedelta(seconds=obj) elif isinstance(obj, Decimal): return timedelta(seconds=float(obj)) elif isinstance(obj, str): return timedelta(seconds=float(obj)) else: raise TypeError("could not convert {obj!r} to timedelta")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timedelta_to_duration(obj: \"timedelta\") -> \"Duration\":\n d = Duration()\n d.seconds = int(obj.total_seconds())\n d.nanos = obj.microseconds * 1000\n return d", "def _convert_to_timedelta(time_diff):\n return timedelta(seconds=time_diff)", "def to_timedelta(value) -> timedelta:\n\n # For values >=24hrs, Pandas converts them to a datetime object.\n # For values <24hrs, Pandas converts them to time object.\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, datetime):\n return value - datetime(1900, 1, 1) + timedelta(hours=24)\n elif isinstance(value, time):\n return datetime.combine(date.min, value) - datetime.min\n elif isinstance(value, str):\n duration_regex = re.compile(\n r\"^(?P<sign>-?)(?P<hours>[0-9]+?):(?P<minutes>[0-9]{2})$\"\n )\n parts = duration_regex.match(value.strip())\n if parts is not None:\n sign = parts.group(\"sign\")\n hours = float(parts.group(\"hours\"))\n minutes = float(parts.group(\"minutes\"))\n if sign == \"-\":\n hours = hours * (-1)\n minutes = minutes * (-1)\n return timedelta(hours=hours, minutes=minutes)\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta \"\n \"object. \"\n f\"Values was {value} and type was {type(value)}.\"\n )\n\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta object. \"\n f\"Value was {value} and type was {type(value)}.\"\n )\n\n return timedelta(0)", "def timedelta(self, *a, **kw):\n from datetime import timedelta\n return timedelta(*a, **kw)", "def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)", "def timedelta(self) -> datetime.timedelta:\n factor = -1 if self.negative else 1\n return datetime.timedelta(\n hours=factor * self.hours, minutes=factor * self.minutes\n )", "def handle(self, value, context: typing.MutableMapping):\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, int):\n return timedelta(milliseconds=int(value * self.resolution))\n try:\n return timedelta(\n milliseconds=int(Decimal(value) * self.resolution))\n except (ValueError, InvalidOperation):\n pass\n\n match = self.duration_re.match(value)\n if not match:\n self.report(value, context)\n return None\n\n params = {\n key: int(value)\n for key, value in match.groupdict().items()\n if value\n }\n return timedelta(**params)", "def to_timedelta(self):\n if not self.isInterval:\n raise ValueError(\"self.isInterval == False. Use to_datetime \"\n \"instead of to_timedelta\")\n\n days = 365*self.years + self._monthsToDays(self.months) + self.days\n return datetime.timedelta(days=self.days, hours=self.hours,\n minutes=self.minutes, seconds=self.seconds)", "def to_timedelta(self, unit=\"ns\", errors=\"raise\"): # noqa: PR02\n return SeriesDefault.register(pandas.to_timedelta)(\n self, unit=unit, errors=errors\n )", "def dehydrate_timedelta(value):\n months = 0\n days = value.days\n seconds = value.seconds\n nanoseconds = 1000 * value.microseconds\n return Structure(ord(b\"E\"), months, days, seconds, nanoseconds)", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)", "def readable_timedelta(timedeltaobj):\n # stolen from https://stackoverflow.com/a/46928226/8207\n if not timedeltaobj:\n return '---'\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400: # 60sec * 60min * 24hrs\n days = secs // 86400\n timetot += \"{} days\".format(int(days))\n secs = secs - days * 86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += \" {} hours\".format(int(hrs))\n secs = secs - hrs * 3600\n\n if secs > 60:\n mins = secs // 60\n timetot += \" {} minutes\".format(int(mins))\n secs = secs - mins * 60\n\n if secs > 0:\n timetot += \" {} seconds\".format(int(secs))\n return timetot", "def datetime_to_epoch_timedelta(obj: \"datetime\") -> \"timedelta\":\n if obj.tzinfo is not None and obj.tzinfo.utcoffset(obj) is not None:\n # aware time; translate to UTC\n obj = obj.astimezone(timezone.utc)\n obj = obj.replace(tzinfo=None)\n return obj - datetime(1970, 1, 1, 0, 0, 0)", "def dt_to_pytimedelta(self):\n return DateTimeDefault.register(pandas.Series.dt.to_pytimedelta)(self)", "def parse_delta(delta):\n match = TIMEDELTA_PATTERN.match(delta)\n if match:\n parts = {k: int(v) for k, v in match.groupdict().items() if v}\n return datetime.timedelta(**parts)", "def parse_timedelta(value: Optional[str]):\n if not value:\n return None\n unit = value[-1]\n amount = int(value[0:-1])\n if unit == \"h\":\n return timedelta(hours=amount)\n elif unit == \"m\":\n return timedelta(minutes=amount)\n elif unit == \"d\":\n return timedelta(days=amount)\n else:\n raise ValueError(f\"Invalid time unit: {value}\")", "def get_ts_delta(ts):\n if isinstance(ts, tuple) and len(ts) == 2:\n return timedelta(seconds=ts[0], microseconds=ts[1])\n else:\n # Kept for compatibility reasons\n return timedelta(seconds=ts)", "def convert_timedelta(item):\r\n if isinstance(item, timedelta):\r\n seconds = int(item.total_seconds())\r\n hours, remainder = divmod(seconds, 3600)\r\n minutes, seconds = divmod(remainder, 60)\r\n formated = '{}h {}m {}s'.format(hours, minutes, seconds)\r\n else:\r\n raise TypeError(item, 'is not timedelta object')\r\n return formated", "def traverse(self, traverser, **kwargs):\n return traverser.timedelta(self, **kwargs)", "def convert_string_to_timedelta(string):\n # type: (str) -> datetime.timedelta\n if is_none_or_empty(string):\n raise ValueError('{} is not a valid timedelta string'.format(string))\n # get days\n tmp = string.split('.')\n if len(tmp) == 2:\n days = int(tmp[0])\n tmp = tmp[1]\n elif len(tmp) == 1:\n days = 0\n tmp = tmp[0]\n else:\n raise ValueError('{} is not a valid timedelta string'.format(string))\n # get total seconds\n tmp = tmp.split(':')\n if len(tmp) != 3:\n raise ValueError('{} is not a valid timedelta string'.format(string))\n totsec = int(tmp[2]) + int(tmp[1]) * 60 + int(tmp[0]) * 3600\n return datetime.timedelta(days, totsec)", "def smooth_timedelta(timedeltaobj):\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400: # 60sec * 60min * 24hrs\n days = secs // 86400\n timetot += \"{} days\".format(int(days))\n secs = secs - days*86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += \" {} hours\".format(int(hrs))\n secs = secs - hrs*3600\n\n if secs > 60:\n mins = secs // 60\n timetot += \" {} minutes\".format(int(mins))\n secs = secs - mins*60\n\n if secs > 0:\n timetot += \" {} seconds\".format(int(secs))\n return timetot", "def test_int_to_timedelta(self):\n @converters.wrap\n def inner_test(param: datetime.timedelta):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, datetime.timedelta(\n days=3, hours=2, minutes=5, seconds=43\n ))\n inner_test(param=266743)", "def timedeltaToFloat(self,time_d):\n time_d_min = time_d / timedelta(minutes=1)\n time_d_s = time_d / timedelta(seconds=1)\n time_d_ms = time_d / timedelta(milliseconds=1)\n\n return (time_d_min * 60 + time_d_s + time_d_ms * 0.001)", "def parse_timedelta(time_str):\n regex = re.compile(r'^((?P<days>[\\.\\d]+?)d)?((?P<hours>[\\.\\d]+?)h)?((?P<minutes>[\\.\\d]+?)m)?((?P<seconds>[\\.\\d]+?)s)?$')\n time_str=replace(time_str,{\n 'sec':'s',\n 'second': 's',\n 'seconds': 's',\n 'minute':'m',\n 'minutes':'m',\n 'min':'m',\n 'mn':'m',\n 'days':'d',\n 'day':'d',\n 'hours':'h',\n 'hour':'h'})\n parts = regex.match(time_str)\n if parts is None: raise ValueError(\"Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m4s'\".format(time_str))\n time_params = {name: float(param) for name, param in parts.groupdict().items() if param}\n return timedelta(**time_params)", "def get_time_delta(n):\n return datetime.timedelta(days=n)", "def smooth_timedelta(timedeltaobj):\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400:\n days = secs // 86400\n if days == 1:\n timetot += f\"{int(days)} dzień\"\n else:\n timetot += f\"{int(days)} dni\"\n secs = secs - days * 86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += f\" {int(hrs)}h\"\n secs = secs - hrs * 3600\n\n if secs > 60:\n mins = secs // 60\n timetot += f\" {int(mins)}m\"\n secs = secs - mins * 60\n\n if secs > 0:\n timetot += f\" {int(secs)}s\"\n return timetot", "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def to_timedelta(da_or_freq):\n if isinstance(da_or_freq, (xr.DataArray, xr.Dataset)):\n freq = da_to_timedelta(da_or_freq)\n else:\n freq = freq_to_timedelta(da_or_freq)\n return freq", "def freq_to_timedelta(freq):\n # Add '1' to freq that doesn't have any digit\n if isinstance(freq, str) and not bool(re.search(r\"\\d\", freq)):\n freq = \"1{}\".format(freq)\n\n # Convert str to datetime.timedelta\n return pd.to_timedelta(freq)", "def dt_td_to_dt(x, dt):\n try:\n x.total_seconds()\n # if it doesn't raise exception it should be a timedelta\n # we shoudl use isinstance(x, timedelta) but it's forbidden\n return(dt + x)\n except:\n # we assume x is a datetime\n return(x)" ]
[ "0.7549371", "0.7208493", "0.71895224", "0.7143933", "0.698645", "0.6935266", "0.6830268", "0.6667241", "0.6469093", "0.6459836", "0.6394731", "0.633544", "0.6323539", "0.63086367", "0.6242395", "0.62389344", "0.6208127", "0.61857295", "0.6101693", "0.60412055", "0.60073674", "0.596489", "0.59415466", "0.5931958", "0.5905871", "0.5883413", "0.5841548", "0.5835426", "0.57959884", "0.5777806" ]
0.8216954
0
Returns a list of control panel types to be added for the default ortho panel layout.
def defaultLayout(): return ['OverlayDisplayToolBar', 'OrthoToolBar', 'OverlayListPanel', 'LocationPanel']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def controlOrder():\n return ['OverlayListPanel',\n 'LocationPanel',\n 'OverlayInfoPanel',\n 'OverlayDisplayPanel',\n 'CanvasSettingsPanel',\n 'AtlasPanel',\n 'OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'FileTreePanel']", "def list():\n return [Dock.OMNI, Dock.LEFT, Dock.RIGHT]", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "def swing_modes(self) -> list[str]:\n return self._swing_modes", "def swing_modes(self):\n return list(SWING_MODE_TO_DPS_MODE.keys())", "def swing_modes(self):\n return self._swing_list", "def getDisplayModes(self, obj):\n modes = []\n return modes", "def _iter_panels(self, sides='lrbt'):\n axs = [self] if self.get_visible() else []\n if not ({*sides} <= {*'lrbt'}):\n raise ValueError(f'Invalid sides {sides!r}.')\n for s in sides:\n for ax in getattr(self, '_' + s + 'panels'):\n if not ax or not ax.get_visible():\n continue\n axs.append(ax)\n return axs", "def get_controls(self):\n return pn.Column(\n pn.Column(\n pn.Row(super().get_controls(), margin = (0, 0, -25, 0)),\n pn.pane.HoloViews(self.get_band_dmap(), linked_axes=False)\n ), \n )", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def _get_default_controls(self):\n\n pass", "def preset_modes(self):\n return list(PRESET_MODE_TO_DPS_MODE.keys())", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def listEnabledTypes(self):\n actual_type = self.request.get('portal_type', None)\n collage_options = getCollageSiteOptions()\n ttool = getToolByName(self.context, 'portal_types', None)\n if ttool is None:\n return None\n return [\n {\n 'id': pt.getId(),\n 'title': p_(pt.Title()),\n 'selected': pt.getId() == actual_type and 'selected' or None\n }\n for pt in ttool.listTypeInfo()\n if collage_options.enabledAlias(pt.getId())\n ]", "def _get_modes(self, L=0):\n l = np.arange(L + 1).reshape((-1, 1))\n z = np.zeros((L + 1, 2))\n return np.hstack([l, z])", "def panels(self, request, panel_list, group):\n return panel_list", "def preset_modes(self) -> Optional[List[str]]:\n return [PRESET_MODE_WHOOSH]", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def output_type_shapes(self):\n return self._output_type_shapes", "def getPanel(*args, allConfigs: bool=True, allPanels: bool=True, allScriptedTypes: bool=True,\n allTypes: bool=True, atPosition: List[int, int]=None, configWithLabel: AnyStr=\"\",\n containing: AnyStr=\"\", invisiblePanels: bool=True, scriptType: AnyStr=\"\", type:\n AnyStr=\"\", typeOf: AnyStr=\"\", underPointer: bool=True, visiblePanels: bool=True,\n withFocus: bool=True, withLabel: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def preset_modes(self) -> list:\n return self._preset_modes", "def _get_modes(self, L=0, M=0, N=0):\n dim_pol = 2 * M + 1\n dim_tor = 2 * N + 1\n l = np.arange(L + 1)\n m = np.arange(dim_pol) - M\n n = np.arange(dim_tor) - N\n ll, mm, nn = np.meshgrid(l, m, n)\n ll = ll.reshape((-1, 1), order=\"F\")\n mm = mm.reshape((-1, 1), order=\"F\")\n nn = nn.reshape((-1, 1), order=\"F\")\n y = np.hstack([ll, mm, nn])\n return y", "def getPaletteButtons(self, paletteType): \n if paletteType == \"cloth\":\n return [ [COLOR_YELLOW, COLOR_ORANGE, COLOR_RED], \n [COLOR_PINK, COLOR_BLUE, COLOR_PURPLE], \n [COLOR_GREEN, COLOR_WHITE, COLOR_BLACK] ] \n elif paletteType == \"hair\":\n return [ [COLOR_BLONDE, COLOR_BROWN, COLOR_BLACK] ]\n elif paletteType == \"skin\":\n return [ [SKIN_1, SKIN_2, SKIN_3], \n [SKIN_4, SKIN_5, SKIN_6], \n [SKIN_7, SKIN_8, SKIN_9]]\n else:\n return []", "def get_op_types(self):\n return self.cur_config['ops']", "def make_widgets(self):\n self.mode_select = Selector(**MODE_SELECT_SETTINGS)\n self.bind_keys_to_modes()\n self.layer_select = Selector(**LAYER_SELECT_SETTINGS)\n self.check_boxes = CheckBoxArray(**CHECK_ARRAY_SETTINGS)\n self.check_boxes.bind_key(pg.K_v, self.toggle_layer_visibility)\n self.navs = [Button(**NAV_LEFT), Button(**NAV_RIGHT)]\n self.save_button = Button(**SAVE_BUTTON)\n self.load_button = Button(**LOAD_BUTTON)\n self.new_button = Button(**NEW_BUTTON)\n self.widgets = [self.mode_select, self.layer_select, self.check_boxes,\n self.navs[0], self.navs[1],\n self.save_button, self.load_button, self.new_button]", "def get_dcm(self):\n control_list = []\n for control in self.__control_list:\n if (control[0] != 'control'):\n control_list.append(control)\n return control_list", "def merge_panels(lst):\n npanels = len(lst) # 16 or 4 or 1\n shape = lst[0].shape # (7, 1, 352, 384)\n ngmods = shape[0] # 7\n dtype = lst[0].dtype #\n\n logger.debug('In merge_panels: number of panels %d number of gain modes %d dtype %s' % (npanels,ngmods,str(dtype)))\n\n # make list for merging of (352,384) blocks in right order\n mrg_lst = []\n for igm in range(ngmods):\n nda1gm = np.stack([lst[ind][igm,0,:] for ind in range(npanels)])\n mrg_lst.append(nda1gm)\n return np.stack(mrg_lst)", "def swing_modes(self):\n return self._swing_modes" ]
[ "0.69737065", "0.5614064", "0.5458695", "0.53503585", "0.53226584", "0.5302421", "0.5277533", "0.5258034", "0.52429163", "0.5233788", "0.5225711", "0.52163637", "0.5176194", "0.5174716", "0.5161187", "0.5154759", "0.514805", "0.5106484", "0.510523", "0.5064108", "0.50568026", "0.50536", "0.5053568", "0.50213003", "0.5014742", "0.49963567", "0.49706882", "0.49694628", "0.4956102", "0.4937697" ]
0.59009427
1
Returns a list of control panel names, specifying the order in which they should appear in the FSLeyes ortho panel settings menu.
def controlOrder(): return ['OverlayListPanel', 'LocationPanel', 'OverlayInfoPanel', 'OverlayDisplayPanel', 'CanvasSettingsPanel', 'AtlasPanel', 'OverlayDisplayToolBar', 'OrthoToolBar', 'FileTreePanel']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def control_name_list(self):\n return list(self._controls.keys())", "def list():\n return [Dock.OMNI, Dock.LEFT, Dock.RIGHT]", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def all_control_names(self):\n return self._get_control_names(\n zope.testbrowser.interfaces.IControl, self.getForm())", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def _get_control_names(self, interface, form):\n return sorted([control.name\n for control in form.controls\n if interface.providedBy(control)])", "def _generateNamedContainingPanel(self, obj, **args):\n result = []\n parent = obj.parent\n while parent and (parent.parent != parent):\n if parent.getRole() == pyatspi.ROLE_PANEL:\n label = self._generateLabelAndName(parent)\n if label:\n result.extend(label)\n break\n parent = parent.parent\n return result", "def get_focus_mode_names(self):\n names = []\n for focus_mode in self.focus_modes:\n names.append(focus_mode['modeName'])\n return names", "def swing_modes(self) -> list[str]:\n return self._swing_modes", "def dir_names(repoman, panel_id):\n dir_panel = repoman.makedir_panel(panel_id)\n return (dir_panel,) + tuple(repoman.makedir_ctypes(panel_id, ctypes=('offset', 'pedestals', 'plots', 'work', 'gain', 'rms', 'status')))", "def get_keys(self):\n return [(['up', 'down', 'pg.up', 'pg.down'],\n 'navigate through the fields.'),\n (['esc'], 'backtrack to the previous pane or exit.'),\n (['F1', '?'], 'open this pane help.')]", "def axesnames(self):\n return self._axesnames", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1", "def defaultLayout():\n return ['OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'OverlayListPanel',\n 'LocationPanel']", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def controls(self):\n return self.tab_ctrl.tab_controls", "def listAllSettingNames(self):\n\t\treturn sorted(self.settings.iterkeys())", "def tab_names(self):\n return self.tab_ctrl.tab_names", "def axesNames(self, data, info):\n return []", "def getOptionsNames(self) -> List[unicode]:\n ...", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def operatorNames(self):\r\n return [\"moveUp\", \"moveDown\",\r\n \"moveLeft\", \"moveRight\"]", "def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def panels(self, request, panel_list, group):\n return panel_list", "def swing_modes(self):\n return self._swing_list", "def preset_modes(self) -> list[str]:\n # Use the Vallox profile names for the preset names.\n return list(STR_TO_VALLOX_PROFILE_SETTABLE.keys())", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "def preset_modes(self):\n return list(PRESET_MODE_TO_DPS_MODE.keys())", "def swing_modes(self):\n return list(SWING_MODE_TO_DPS_MODE.keys())" ]
[ "0.6171476", "0.5942579", "0.57504004", "0.57478034", "0.57332575", "0.56833947", "0.5601973", "0.55577797", "0.5538097", "0.5512628", "0.54911685", "0.54777855", "0.5435794", "0.5428175", "0.53371155", "0.5336312", "0.5330748", "0.53245544", "0.5322314", "0.5316433", "0.5286899", "0.5275921", "0.52555144", "0.5212944", "0.5208907", "0.52083874", "0.51866984", "0.5178568", "0.517767", "0.5174035" ]
0.7900497
0
Returns a list of tool names, specifying the order in which they should appear in the FSLeyes ortho panel settings menu.
def toolOrder(): return ['CropImageAction', 'EditTransformAction', 'SampleLineAction', 'LoadAnnotationsAction', 'SaveAnnotationsAction', 'PearsonCorrelateAction']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def controlOrder():\n return ['OverlayListPanel',\n 'LocationPanel',\n 'OverlayInfoPanel',\n 'OverlayDisplayPanel',\n 'CanvasSettingsPanel',\n 'AtlasPanel',\n 'OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'FileTreePanel']", "def getTools(self):\n return [self.toggleEditMode]", "def get_tools() -> List[Dict[str, Any]]:\n tools = []\n with os.scandir(PHP_TOOL_PATH) as it:\n for item in it:\n if not item.name.startswith(\".\") and item.is_dir():\n data = get_tool_options(item.name)\n tools.append(\n {\n \"dir\": \"../php_tools/\" + item.name,\n \"name\": data[\"name\"],\n \"exclude\": [str(x) for x in data[\"exclude\"]],\n }\n )\n return sorted(DEFAULT_TOOLS + tools, key=lambda tool: tool[\"name\"].replace(\"*\", \"\"))", "def xspace_to_tool_names(xspace_paths):\n raw_data, success = _pywrap_profiler.xspace_to_tools_data(\n xspace_paths, 'tool_names')\n if success:\n return [tool + '^' for tool in raw_data.decode().split(',')]\n return []", "def get_tools(self):\r\n\t\tlogger.debug(\"Getting the tools\")\r\n\t\t\r\n\t\treturn db.get_items('tools')", "def tools(self):\n tool1 = TranslatedShape(shape_in=\n RotatedShape(shape_in=\n Cylinder(radius=\n self.wheels_properties[\n 1] + 10.,\n height=400.,\n position=self.position),\n rotation_point=self.position,\n vector=Vector(1, 0, 0),\n angle=radians(90)),\n displacement=\n Vector(self.wheels_properties[0],\n 299.,\n -self.positions[1][0]))\n tool2 = TranslatedShape(shape_in=tool1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n tool3 = MirroredShape(shape_in=tool1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n tool4 = MirroredShape(shape_in=tool2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [tool1, tool2, tool3, tool4]", "def get_focus_mode_names(self):\n names = []\n for focus_mode in self.focus_modes:\n names.append(focus_mode['modeName'])\n return names", "def workflowLessTypes(self):\n\n tools = [c.getName() for c in\n self.atgenerator.getGeneratedTools(self.package)\n if not\n utils.isTGVFalse(c.getTaggedValue('autoinstall'))]\n tools.sort()\n return tools", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def CmdList(self):\n return sorted(self._cmd_alias_list)", "def getOptionsNames(self) -> List[unicode]:\n ...", "def get_tool_shape_ids(self):\n\n tool_shape_ids = [self.variables.zoom_rect_id,\n self.variables.select_rect_id]\n return tool_shape_ids", "def tools(self):\n\n return self._available_tools", "def ovftool_args(self):\n return list(self._ovftool_args)", "def register_other_tools(self):\n self.add_tool(SaveAsTool)\n self.add_tool(CopyToClipboardTool)\n self.add_tool(PrintTool)\n self.add_tool(HelpTool)", "def get_option_names(self):\n # There are no options until the current exploit is set\n if self.exploit is None:\n return []\n\n option_names = self.options.get_option_names()\n\n if self.input is not None:\n option_names += ['input.' + option for option in self.input.options.get_option_names()]\n\n if self.output is not None:\n option_names += ['output.' + option for option in self.output.options.get_option_names()]\n\n if self.exploit is not None:\n option_names += ['exploit.' + option for option in self.exploit.options.get_option_names()]\n\n return option_names", "def swing_modes(self) -> list[str]:\n return self._swing_modes", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def getOrderedSetupList(whichTables = None):\n # if whichTables is None, then databaseDependenciesForSetup.keys() is used\n return socorro_pri.dependencyOrder(databaseDependenciesForSetup,whichTables)", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def list_commands_for_help(self, ctx):\n commands = super(OrderedHelpGroup, self).list_commands(ctx)\n commands = (c[1] for c in sorted(\n (self.help_priorities.get(command, 1), command if command not in self.hidden_commands else None)\n for command in commands)\n )\n #commands = [command if command not in self.hidden_commands else None for command in commands]\n return commands", "def get_explorer_toolbox() -> List[Tuple[str, str, str]]:\n explorer_toolbox = list(_explorer_toolbox)\n explorer_toolbox.extend(\n (func_name, title, description)\n for func_name, title, description in _bio2bel_functions\n if _function_is_registered(func_name)\n )\n return explorer_toolbox", "def toolName(self):\n return QCoreApplication.translate(\"VDLTools\", \"Multiselect\")", "def persist_tools_options(self, *args):\n\n\t\t# Panel-wide classic tools options (they are not Gio actions!)\n\t\tself._tools_gsettings.set_int('last-size', self.get_tool_width())\n\t\tself._persist_color(self.get_left_color(), 'last-left-rgba')\n\t\tself._persist_color(self.get_right_color(), 'last-right-rgba')\n\n\t\t# Tool-wide boolean actions\n\t\tfor action_name in self._boolean_actions_from_gsetting:\n\t\t\tkey_name = self._boolean_actions_from_gsetting[action_name]\n\t\t\tself._persist_boolean(action_name, key_name)\n\n\t\t# Tool-wide \"enum\" actions\n\t\tfor action_name in self._string_actions_from_gsetting:\n\t\t\tkey_name = self._string_actions_from_gsetting[action_name]\n\t\t\tself._persist_string(action_name, key_name)", "def tool_channels(self):\n return [c for c in self.values() if isinstance(c, ToolChannel)]", "def get_keys(self):\n return [(['up', 'down', 'pg.up', 'pg.down'],\n 'navigate through the fields.'),\n (['esc'], 'backtrack to the previous pane or exit.'),\n (['F1', '?'], 'open this pane help.')]", "def register_standard_tools(self):\n t = self.add_tool(SelectTool)\n self.set_default_tool(t)\n self.add_tool(RectZoomTool)\n self.add_tool(BasePlotMenuTool, \"item\")\n self.add_tool(ExportItemDataTool)\n try:\n import spyderlib.widgets.objecteditor # analysis:ignore\n self.add_tool(EditItemDataTool)\n except ImportError:\n pass\n self.add_tool(ItemCenterTool)\n self.add_tool(DeleteItemTool)\n self.add_separator_tool()\n self.add_tool(BasePlotMenuTool, \"grid\")\n self.add_tool(BasePlotMenuTool, \"axes\")\n self.add_tool(DisplayCoordsTool)\n if self.get_itemlist_panel():\n self.add_tool(ItemListPanelTool)", "def get_tools_used_by_groups():\n global tools_used_by_groups\n\n if not tools_used_by_groups:\n tools_used_by_groups = rsh.tools_used_by_groups(get_srcs())\n \n return tools_used_by_groups", "def get_command_names(self):\n return list(self.commands.keys())", "def get_setup_names(self):\n self.setup_names = list(self._optimetrics.GetSetupNames())\n return self.setup_names.copy()" ]
[ "0.676183", "0.6499401", "0.62972677", "0.6249077", "0.5970247", "0.5940185", "0.58990747", "0.57945955", "0.5769095", "0.5735396", "0.56998837", "0.5694195", "0.5657685", "0.56296474", "0.5604566", "0.55864596", "0.5580212", "0.55689025", "0.55604416", "0.55479664", "0.55206907", "0.55044174", "0.55042785", "0.54979974", "0.5490147", "0.5478498", "0.54726774", "0.54491425", "0.5437542", "0.54302394" ]
0.6583391
1
Called when the interaction profile changes (see
def __profileChanged(self, inst, topic, value): old, new = value if new is orthoeditprofile.OrthoEditProfile: self.__addEditMenu() elif old is orthoeditprofile.OrthoEditProfile: self.__removeEditMenu()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activated(self):", "def _1_profile(self, _1_profile):\n\n self.__1_profile = _1_profile", "def update_profile(self, channels=None): # pragma: no cover\n pass", "def on_activate(self):", "def preferencesChanged(self):\n # do nothing\n pass", "def on_activate(self) -> None:", "def on_enter(self, userdata):\n pass", "def visualAppearanceChanged(event, obj):\n\n if _currentPresentationManager >= 0:\n _PRESENTATION_MANAGERS[_currentPresentationManager].\\\n visualAppearanceChanged(event, obj)", "def cyChangeProfile(self):\n d = database.getCurrentAndMaxProfileId()\n d.addCallback(self.cbChangeProfile)\n return d", "def onUpdated(self):", "def _profile_on_disconnect(self, device):\n if self.on_connected_changed:\n self.on_connected_changed(\n device=device,\n connected=False)", "def visit_interaction(self, interaction):\n for opt in self.event_json['options']:\n opt_json = self.world_json[opt['id']]\n self._connect_option(interaction, opt_json)", "def hook_observemove(self):\n ui.observemove(self,self.ixTurn,self.ixHotSeat)", "def stateChanged(self, arg0):\n syncJSONtoUI()", "def profile_step(self):\n import profile\n\n profile.run(\"world.step()\")", "def interact(self):\n # Setup the initial value options for the location\n dim = self.default_dimension\n dim2 = set(self.data.dims).difference({dim}).pop()\n options = self.data[dim2].values.tolist()[::self.profile_interval]\n mid = options[len(options)//2]\n\n # Make the slider for choosing the location\n slider_label = widgets.Label(\"at {} value\".format(dim2))\n slider = widgets.SelectionSlider(options=options, value=mid,\n layout=widgets.Layout(width=\"350px\"))\n # Make a menu for choosing the profile direction\n dimension_chooser = widgets.Dropdown(\n options=self.data.dims.keys(), value=dim,\n description=\"Profile along\")\n\n def displayer(location, dimension):\n \"Update and display the plot with given arguments\"\n self.plot(location, dimension)\n display(self.fig)\n\n def handle_dimension_change(change):\n \"Change the location options when dimension changes\"\n dim2 = set(self.data.dims).difference({change.new}).pop()\n slider_label.value = \"at {} value\".format(dim2)\n options = self.data[dim2].values.tolist()[::self.profile_interval]\n slider.options = options\n slider.value = options[len(options)//2]\n\n # Connect the dimension change to the slider\n dimension_chooser.observe(handle_dimension_change, names='value')\n\n # Make the output display and connect it to the callback\n output = widgets.interactive_output(\n displayer, {'location': slider, 'dimension': dimension_chooser})\n\n # Make a title for the widget\n title = widgets.HTML(\n '<strong style=\"font-size: 1.5em;\">Profile selector</strong>')\n\n # Layout the widgets\n layout = widgets.VBox(\n [title,\n widgets.HBox([dimension_chooser, slider_label, slider]),\n output],\n layout=widgets.Layout(align_items=\"center\"))\n\n # For some reason, calling _figure_setup inserts a plot in the output\n # Call clear_output to get rid of it.\n with output:\n clear_output(wait=True)\n display(self.fig)\n\n return layout", "def setprofile(function): # real signature unknown; restored from __doc__\n pass", "def preferencesChanged(self):\n self.__logViewer.preferencesChanged()", "def observe(self, obs):\n self.observation = obs\n self.selected = obs.selected\n \n #############################\n # Update of turn statistics #\n #############################\n if self.id == (obs.step % 6):\n # Store base locations\n if self.__class__.home_base is None:\n self.__class__.home_base = (obs.loc[0]+16, obs.loc[1]+8)\n self.__class__.enemy_base = \\\n self.getSymmetricOpposite(self.__class__.home_base)\n \n # Reset trendingSpot\n self.__class__.trendingSpot = {}\n \n # Update friendly CPs\n self.__class__.friendlyCPs = map(lambda x: x[0:2], \n filter(lambda x: x[2] == self.team, obs.cps))\n \n # Update enemy CPs\n self.__class__.enemyCPs = map(lambda x:x[0:2], \n filter(lambda x: x[2] != self.team, obs.cps))\n \n # Update ammo packs \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n\n # Update inFriendlyHands stat\n if SETTINGS_DOMINATION_ADDS_UP:\n inFriendlyHands = self.__class__.inFriendlyHands\n else:\n inFriendlyHands = {}\n for cp in self.__class__.friendlyCPs:\n if cp in self.__class__.inFriendlyHands:\n inFriendlyHands[cp] = self.__class__.inFriendlyHands[cp] + 1\n else:\n inFriendlyHands[cp] = 1\n self.__class__.inFriendlyHands = inFriendlyHands", "async def profile(self, ctx:utils.Context):\n\n pass", "def observe(self, observation) -> None:\n super().observe(observation) # Already calls self.policy.update()\n add_data_to_gp(\n self.policy.gp, observation.action.unsqueeze(-1), observation.reward\n )\n self.logger.update(num_gp_inputs=len(self.policy.gp.train_targets))\n if isinstance(self.policy.gp, SparseGP):\n inducing_points = torch.cat(\n (self.policy.gp.xu, observation.action.unsqueeze(-1)), dim=-2\n )\n\n inducing_points = bkb(self.policy.gp, inducing_points)\n self.policy.gp.set_inducing_points(inducing_points)\n self.logger.update(num_inducing_points=self.policy.gp.xu.shape[0])", "def _on_scene_change(self, *args, **kwargs):\n self.on_scene_change()", "def update_interactive_state(self, agent):\n if agent.manual_control:\n # get key presses\n keys = pygame.key.get_pressed()\n agent.handle_move_keys(keys, self.camera)\n agent.handle_other_keys(keys, self.camera)\n else:\n if (isinstance(agent.model, DeepCNNModel)):\n agent.act(self.get_pixels())\n else:\n agent.act(self.get_state())\n\n agent.handle_merge()", "def manipulate_activity():\n pass", "def after_turn(self):\n pass", "def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)", "def before_update(self, actor_critic, algo, *args):\n raise NotImplementedError", "def profile_data(self, profile_data):\n\n self._profile_data = profile_data", "def beforeUpdate(self):", "def set_profile(self, profile: str):\n self._profile = profile" ]
[ "0.58401513", "0.5827181", "0.5751403", "0.5736363", "0.5725692", "0.5656051", "0.5621046", "0.55915654", "0.5552427", "0.55414283", "0.5520362", "0.54863906", "0.54767716", "0.5453715", "0.5445739", "0.5440995", "0.542009", "0.5380361", "0.53492814", "0.5335435", "0.5328072", "0.5308902", "0.5306942", "0.52989036", "0.5276948", "0.52684665", "0.525831", "0.5253232", "0.52493286", "0.52490985" ]
0.67051786
0
Returns a list of methods to be added to the ``FSLeyesFrame`` Tools menu for ``OrthoPanel`` views.
def getTools(self): return [self.toggleEditMode]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_explorer_toolbox() -> List[Tuple[str, str, str]]:\n explorer_toolbox = list(_explorer_toolbox)\n explorer_toolbox.extend(\n (func_name, title, description)\n for func_name, title, description in _bio2bel_functions\n if _function_is_registered(func_name)\n )\n return explorer_toolbox", "def _methods_of(cls):\n # The idea of unbound methods exists in Python 2 and was removed in\n # Python 3, so \"inspect.ismethod\" is used here for Python 2 and\n # \"inspect.isfunction\" for Python 3.\n all_methods = inspect.getmembers(\n cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))\n methods = [m for m in all_methods if not m[0].startswith(\"_\")]\n\n help_groups = {}\n for m in methods:\n group = getattr(m[1], \"help_group\", \"0\")\n help_groups.setdefault(group, []).append(m)\n\n if len(help_groups) > 1:\n # we should sort methods by groups\n methods = []\n for group in sorted(help_groups.items(), key=lambda x: x[0]):\n if methods:\n # None -> empty line between groups\n methods.append((None, None))\n methods.extend(group[1])\n return methods", "def _getAvailableMethods(self):\n bsc = getToolByName(self, 'bika_setup_catalog')\n items = [(c.UID, c.Title) \\\n for c in bsc(portal_type='Method',\n inactive_state = 'active')]\n items.sort(lambda x,y:cmp(x[1], y[1]))\n items.insert(0, ('', t(_('None'))))\n return DisplayList(items)", "def get_menu_items(self) -> typing.List[typing.Tuple[str, typing.List[typing.Tuple[str, typing.Callable[[], None]]]]]: #this method is to be queried by the root frame when it is creating the menu bar at the top of the screen and needs options to put in it\n return []", "def menus(self):\r\n return []", "def methods(self) -> List[str]:\n # TODO(*): Consider make this an abstractmethod.\n return [\"fit\", \"predict\"]", "def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)", "def _get_exposed_commands(self):\n # get exposed commands\n exposed = []\n for member_key in dir(self):\n member = getattr(self, member_key)\n if hasattr(member, '__cement_meta__'):\n exposed.append(_clean_label(member_key))\n return exposed", "def get_menus(self):\n \n return [\n ('File', [\n ('New task...\\tCtrl+N', 'Add a new task', self.OnAddTask, wx.ID_NEW),\n ('Edit task...\\tCtrl+E', 'Edit the selected task', None, wx.ID_OPEN),\n ('Remove task...\\tDel', 'Remove the selected task', None, wx.ID_CLOSE),\n (None, ),\n ('&Quit\\tCtrl+Q', 'Close down this program', self.OnExit, wx.ID_EXIT)\n ]),\n ('Help', [\n ('About %s...\\tCtrl+H' % APP_TITLE, 'Learn a little about this program', self.OnAbout, wx.ID_ABOUT),\n ]),\n ]", "def get_command_functions(self) -> None:\n # List of all methods of a bot object\n methods = [method for method in dir(self) if callable(getattr(self, method))]\n for method in methods:\n try:\n # Test if method has attribute _decorators\n if Bot.command in getattr(self, method)._decorators:\n # Add command to commands list\n self.commands[method] = getattr(self, method)\n except AttributeError:\n continue", "def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]", "def get_renderMethods(self):\n return self.render_methods", "def tools(asmethods):\r\n tm=[method[0] for method in asmethods]\r\n tools_lst = list(dict.fromkeys(tm))\r\n return tools_lst, (\"Number of methods: {}\".format(len(tools_lst)))", "def get_menus():\n\n pass", "def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list", "def register_standard_tools(self):\n t = self.add_tool(SelectTool)\n self.set_default_tool(t)\n self.add_tool(RectZoomTool)\n self.add_tool(BasePlotMenuTool, \"item\")\n self.add_tool(ExportItemDataTool)\n try:\n import spyderlib.widgets.objecteditor # analysis:ignore\n self.add_tool(EditItemDataTool)\n except ImportError:\n pass\n self.add_tool(ItemCenterTool)\n self.add_tool(DeleteItemTool)\n self.add_separator_tool()\n self.add_tool(BasePlotMenuTool, \"grid\")\n self.add_tool(BasePlotMenuTool, \"axes\")\n self.add_tool(DisplayCoordsTool)\n if self.get_itemlist_panel():\n self.add_tool(ItemListPanelTool)", "def get_tools(cls):\n pass", "def show_tools():\n print(\"\"\"\n List the tools available in this package:\n create_date_features(df, date) #TODO\n create_date_feature_bisiness_quater(df = None, date = None)\n create_date_feature_daytime(df = None, date = None)\n create_date_feature_is_public_holiday(df, date, start, end, country = 'US')\n create_date_feature_is_month_end(df = None, date = None, last = 1)\n create_date_feature_is_weekend(df = None, date = None)\n create_date_feature_is_weekday(df = None, date = None)\n create_date_feature_season(df = None, date = None)\n create_grid(df, keys, target) #TODO\n create_lag_features_with_time_feature(df, cols, time, n = 5, fillna = True)\n create_lag_features_ohne_time_feature(df, cols, n = 5, fillna = True)\n create_window_feature(df, cols = None, col2 = None, win_size = 2, win_type = None, min_periods = 1, agg = 'mean')\n mean_encoder(df, cols, tg)\n \"\"\")", "def get_extension_funcs():\n raise NotImplementedError()", "def __init__(self):\r\n self.label = \"OVL Tools\"\r\n self.alias = \"\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [OVLtoFeature, BatchOVLtoFeature]", "def list_methods(self):\n return list(self.methods.keys())", "def register_other_tools(self):\n self.add_tool(SaveAsTool)\n self.add_tool(CopyToClipboardTool)\n self.add_tool(PrintTool)\n self.add_tool(HelpTool)", "def get_plugin_actions(self):\n return []", "def get_plugin_actions(self):\n return []", "def get_extra_lvs_hooks(self) -> List[HammerToolHookAction]:\n return list()", "def getCommands(self):", "def index(self, *_, **__):\n # TODO: There has to be a better way of doing this...\n self.extension = [''.join(self.extensions)]\n _vars = {k: getattr(self,k) for k in dir(self) if not k.startswith('_')}\n _vars['path'] = self.buildpath()\n _vars['extension'] = '\\n\\t'.join(self.extensions)\n return self.menu % _vars", "def get_tools(self):\r\n\t\tlogger.debug(\"Getting the tools\")\r\n\t\t\r\n\t\treturn db.get_items('tools')", "def _interface_methods_ ( self ) :\n \n if not self._itool : return tuple()\n \n my_methods = list ( dir ( GaudiPython.Bindings.iAlgTool ) )\n my_methods += [ i for i in dir ( cpp.IAlgTool ) ] \n my_methods += [ i for i in dir ( cpp.IInterface ) ]\n my_methods += [ i for i in dir ( cpp.IStateful ) ]\n if self._ip : my_methods += [ i for i in dir ( self._ip ) ]\n my_methods = set( my_methods )\n if_methods = set() \n for i in dir( self._itool ) :\n if i in my_methods : continue\n if_methods.add ( i )\n \n return tuple( if_methods )", "def game_functions(self):\n\t\treturn self._game_functions" ]
[ "0.6327928", "0.61868453", "0.61474377", "0.6045081", "0.59285265", "0.5887422", "0.5842914", "0.5765655", "0.5742084", "0.5722469", "0.57150394", "0.5635104", "0.56288284", "0.562044", "0.56000537", "0.5555994", "0.55218905", "0.5518331", "0.54967356", "0.5490662", "0.5485893", "0.5483575", "0.5483338", "0.5483338", "0.5475936", "0.54634714", "0.5455466", "0.544344", "0.5434855", "0.5426259" ]
0.6589462
0
Called whenever the panel is resized. Makes sure that the
def __onResize(self, ev): ev.Skip() self.__calcCanvasSizes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)", "def resizeEvent(self, event):\n self.autosize()\n super().resizeEvent(event)", "def handleResize(self):\n pass", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def resizeEvent(self, event):\n self.updateViewer()", "def ev_windowresized(self, event: WindowResized) -> None:", "def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))", "def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)", "def resizeEvent(self, event):\n super().resizeEvent(event)\n self.resized.emit()", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def OnResizeEnd(self, event):\n self._resizing = False\n self.Refresh()", "def relayout(self): \n\t\t#self.urmaswin.Layout()\n\t\t#wx.CallAfter(self.urmaswin.Layout)\n\t\t#wx.CallAfter(self.visualizer.OnSize)", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def rescale(self, event: tkinter.Event) -> None:\n # the properties which are linked to the event of reconfiguration\n # contain all the new sizes of the panel :\n self.width, self.height = event.width - 4, event.height - 4\n # The subtraction of 4 pixels is here to compensate the width\n # of the 'highlight bordure' rolling the canvas)\n self.draw_board()", "def onRulerResized(self):\n self.resize(self.ruler.width, self.height)", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def resizeEvent(self, event):\n self.refresh_images(resize=True)\n QMainWindow.resizeEvent(self, event)", "def do_relayout(self):\n # This method is called whenever a relayout is requested. By\n # default, this is when the layout children change. In that case\n # we just need to update the min and max sizes. We are a top\n # level window, so no one really cares about our size hint. \n self.update_minimum_size()\n self.update_maximum_size()", "def OnSize(self, event):\r\n\r\n self.Layout()", "def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)", "def _changed_size(self, **kw):\n\t\tself._clear_matrix()\n\t\t\n\t\tself._recalc_adjustments()\n\t\t\n\t\tif self.flags() & gtk.REALIZED:\n\t\t\tif kw.get('resize', True): self.queue_resize()\n\t\t\tif kw.get('draw', True): self.queue_draw()", "def container_resized(self, delta):\n\t\tdw, dh = delta\n\t\tleft, top, right, bottom = self.bounds\n\t\tif self.hmove:\n\t\t\tleft += dw\n\t\t\tright += dw\n\t\telif self.hstretch:\n\t\t\tright += dw\n\t\tif self.vmove:\n\t\t\ttop += dh\n\t\t\tbottom += dh\n\t\telif self.vstretch:\n\t\t\tbottom += dh\n\t\tself.bounds = (left, top, right, bottom)", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def OnSize(self, event):\r\n\r\n if self._owner_mgr and self._send_size:\r\n self._owner_mgr.OnFloatingPaneResized(self._pane_window, event.GetSize())", "def onSize(self,event=None):\n if self.app.DEBUG:\n print 'Event: Parent: %s.onSize'%self.__class__\n if self.redraw:self.redraw()", "def resizeEvent(self, event):\r\n QDialog.resizeEvent(self, event)\r\n self.emit(SIGNAL(\"size_change(QSize)\"), self.size())", "def on_window_resized(self):\n self._compute_jitters()", "def resize(self,event):\n if event.widget==self.master:\n Y=event.height\n X=event.width\n self.seqframe.configure(width=X-self.canvas_border_x,\n height=Y-self.canvas_border_y)\n return", "def resize(self):\n pass" ]
[ "0.7825565", "0.76452774", "0.76447415", "0.7641785", "0.7606794", "0.7569148", "0.7501498", "0.74807316", "0.7457183", "0.74085385", "0.7377476", "0.73356706", "0.7333872", "0.72169006", "0.72143465", "0.7176016", "0.7165652", "0.7130218", "0.7113977", "0.70820254", "0.70300025", "0.7029162", "0.70268327", "0.7018864", "0.6967913", "0.6953969", "0.6941419", "0.6929693", "0.6922533", "0.6903907" ]
0.76875395
1
Check if two `DomainParam` are equal by comparing all attributes defined in `get_field_names()`.
def __eq__(self, other): if not isinstance(other, DomainParam): raise pyrado.TypeErr(given=other, expected_type=DomainParam) for fn in self.get_field_names(): if getattr(self, fn) != getattr(other, fn): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return np.all([\n self.__getattribute__(name) == other.__getattribute__(name)\n for name in self._fields\n ])", "def check_params_equal(param1, param2):\n for key, val in param1.items():\n if np.any(param1[key] != param2[key]):\n return False\n return True", "def _assert_fields_equal(self, field1, field2):\n self._assert_field_descriptors_equal(\n field1.descriptor, field2.descriptor)\n self.assertEqual(field1.method_name, field2.method_name)\n self.assertEqual(field1.partial_args, field2.partial_args)\n self.assertEqual(field1.partial_kwargs, field2.partial_kwargs)\n if field1.context_args is None:\n self.assertIsNone(field2.context_args)\n else:\n self.assertEqual(\n set(field1.context_args), set(field2.context_args))\n self.assertEqual(field1.attr, field2.attr)", "def equality_check(a, b):\n\n def check_item(x, y, attr):\n if isinstance(x, hoomd.operation._HOOMDGetSetAttrBase):\n equality_check(x, y)\n return\n if isinstance(x, Mapping):\n for k, v in x.items():\n assert k in y, f\"For attr {attr}, key difference {k}\"\n check_item(v, y[k], \".\".join((attr, str(k))))\n return\n if not isinstance(x, str) and hasattr(x, \"__len__\"):\n assert len(x) == len(y)\n for i, (v_x, v_y) in enumerate(zip(x, y)):\n check_item(v_x, v_y, attr + f\"[{i}]\")\n return\n if isinstance(x, float):\n assert numpy.isclose(x, y), f\"attr '{attr}' not equal:\"\n return\n assert x == y, f\"attr '{attr}' not equal:\"\n\n if not isinstance(a, hoomd.operation._HOOMDGetSetAttrBase):\n return a == b\n assert type(a) == type(b)\n\n _check_obj_attr_compatibility(a, b)\n\n for attr in a.__dict__:\n if attr in a._skip_for_equality:\n continue\n\n if attr == \"_param_dict\":\n param_keys = a._param_dict.keys()\n b_param_keys = b._param_dict.keys()\n # Check key equality\n assert param_keys == b_param_keys, \"Incompatible param_dict keys:\"\n # Check item equality\n for key in param_keys:\n check_item(a._param_dict[key], b._param_dict[key], key)\n continue\n\n if attr == \"_typeparam_dict\":\n keys = a._typeparam_dict.keys()\n b_keys = b._typeparam_dict.keys()\n # Check key equality\n assert keys == b_keys, \"Incompatible _typeparam_dict:\"\n # Check item equality\n for key in keys:\n for type_, value in a._typeparam_dict[key].items():\n check_item(value, b._typeparam_dict[key][type_], \".\".join(\n (key, str(type_))))\n continue\n\n check_item(a.__dict__[attr], b.__dict__[attr], attr)", "def compare_fields(field1, field2):\r\n if field1 is None and field2 is None:\r\n return True\r\n\r\n if (field1 is None and field2 is not None) or\\\r\n (field2 is None and field1 is not None):\r\n return False\r\n\r\n if field1 == field2:\r\n return True\r\n\r\n return False", "def __eq__(self, other):\n comparable_fields = ['uuid', 'name', 'email']\n return all([getattr(self, field) == getattr(other, field)\n for field in comparable_fields])", "def compare_fields(field1, field2):\n if field1 is None and field2 is None:\n return True\n\n if (field1 is None and field2 is not None) or\\\n (field2 is None and field1 is not None):\n return False\n\n if field1 == field2:\n return True\n\n return False", "def _are_equal_parameters(u1, u2):\n if u1.keys() != u2.keys():\n return False\n else:\n for k, v in u1.items():\n if not u.allclose_units(v, u2[k]):\n return False\n\n return True", "def attr_is_equal(first_obj, second_obj, attr):\n import numpy as np\n\n # Avoid comparing None's.\n return attr_has_same_shape(first_obj, second_obj, attr) and np.array_equal(\n getattr(first_obj, attr), getattr(second_obj, attr)\n )", "def __eq__(self, other):\n return (((not self.name and not other.name) or\n self.name == other.name) and\n self.fields == other.fields)", "def _assert_field_descriptors_equal(\n self, field_descriptor1, field_descriptor2):\n self.assertEqual(field_descriptor1.name, field_descriptor2.name)\n self.assertEqual(\n field_descriptor1.description, field_descriptor2.description)\n self.assertEqual(\n field_descriptor1.is_deprecated, field_descriptor2.is_deprecated)\n self.assertEqual(\n field_descriptor1.deprecation_reason,\n field_descriptor2.deprecation_reason)\n self.assertEqual(\n field_descriptor1.field_type.type_str(),\n field_descriptor2.field_type.type_str())\n self.assertEqual(\n set(field_descriptor1.args.iterkeys()),\n set(field_descriptor2.args.iterkeys()))\n for name, t in field_descriptor1.args.iteritems():\n self.assertEqual(\n t.type_str(), field_descriptor2.args[name].type_str())", "def __eq__(self, other):\n if not isinstance(other, UnchangeableParam):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return (self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)", "def __eq__(self, other: Any) -> bool:\n # Subclasses should call this as part of their equality checks\n return (\n isinstance(other, BaseField)\n and self._is_nullable == other._is_nullable\n and self._resolve_field_name() == other._resolve_field_name() # may be None == None\n and self._spark_type_class == other._spark_type_class\n and self._metadata == other._metadata # may be None == None\n )", "def __eq__(self, other):\n if not isinstance(other, ParameterDict):\n return NotImplemented\n return (set(self.keys()) == set(other.keys()) and np.all(\n [np.all(self[key] == other[key]) for key in self.keys()]))", "def __eq__(self, other):\n return (other is not None and\n self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)", "def _data_equality(self, other):\n if self.__class__ is not other.__class__:\n raise ValueError('Both classes must be the same')\n\n for field in self._meta.fields:\n if getattr(self, field.name, None) != getattr(other, field.name, None):\n return False\n\n return True", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def _assert_fields_match(self, actual_field, expected_field):\n assert actual_field is not None, \"Could not find field {name}\".format(name=expected_field[\"name\"])\n\n for key in expected_field:\n assert actual_field[key] == expected_field[key], \\\n \"Expected {expected} for {key} but got {actual} instead\".format(\n key=key, actual=actual_field[key], expected=expected_field[key])", "def __eq__(self, other):\n if not isinstance(other, Parameter):\n return False\n\n return self.to_dict() == other.to_dict()", "def _validate_descriptors_are_equal(a, b, ignore_args=None):\n if ignore_args is None:\n ignore_args = []\n if a.model_type != b.model_type:\n raise ValueError(\"Descriptors have different model types: %s vs %s\" % (a.model_type, b.model_type))\n if len(a.kwargs) != len(b.kwargs):\n raise ValueError(\"Descriptors have kwargs of different length\")\n for k, v in a.kwargs.items():\n if k not in b.kwargs:\n raise ValueError(\"Descriptors a != b because b is missing value for '%s'\", k)\n if k not in ignore_args and b.kwargs[k] != v:\n raise ValueError(\"Descriptors a != b because of different values for '%s': %s != %s\" % (k, v, b.kwargs[k]))", "def fields_match_test(fields: Optional[Sequence[str]] = None):\n\n def fields_match(tested_label: Label, target_label: Label) -> bool:\n if fields is not ...:\n return all(\n getattr(tested_label, field) == getattr(target_label, field)\n for field in fields)\n else:\n return tested_label.shallow_fields_equal(target_label)\n\n return fields_match", "def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))", "def __eq__(self, other):\n return (other is not None and\n ((not self.name and not other.name) or\n self.name == other.name) and\n ((not self.expressions and not other.expressions) or\n self.expressions == other.expressions) and\n self.fields == other.fields and\n dict.__eq__(self.attrs or {}, other.attrs or {}))", "def __eq__(self, other):\n if type(self) != type(other):\n return False\n s_vars = vars(self)\n o_vars = vars(other)\n for v in vars(self):\n if s_vars[v] != o_vars[v]:\n print(\"unequal property {0}\\n\".format(v))\n if v.endswith(\"last_count_update_time\"):\n print(\"self: {0}\\n\".format(s_vars[v]))\n print(\"othr: {0}\\n\".format(o_vars[v]))\n return False\n return True", "def __eq__(self, other):\n\t\treturn all((getattr(self, attr, None) == getattr(other, attr, None) for attr in self.attrs))", "def __eq__(self, other: 'ModelParameters') -> bool:\n if not isinstance(other, ModelParameters) or len(self) != len(other):\n return False\n else:\n return all(torch.equal(p_self, p_other) for p_self, p_other in zip(self.parameters, other.parameters))", "def check_values(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return numpy.allclose(attr1.eval(), attr2.eval())", "def __eq__(self, other):\n if isinstance(other, self._get_class()):\n return self._.hash_parameters == other._.hash_parameters\n else:\n return not isinstance(other, ASParameters) \\\n and self._.hash_parameters == other", "def __eq__(self, other):\n if not isinstance(other, DomainNameInfo):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.7002829", "0.66476244", "0.6631622", "0.6536995", "0.65236616", "0.6493726", "0.6433476", "0.6410068", "0.63820744", "0.6351537", "0.6334997", "0.62845343", "0.62749666", "0.6233371", "0.62293303", "0.62159556", "0.61955047", "0.6181133", "0.6106232", "0.6080795", "0.6074667", "0.607218", "0.6059549", "0.6028287", "0.6004548", "0.6000188", "0.5998327", "0.5967277", "0.5953494", "0.5924821" ]
0.7762589
0
Put a object in the datastore. An object with the same name must not exist, otherwise a AlreadyInDataStore exception is thrown
def put(self, name, obj, lifetime=ObjectLifetime.Event): # check if object with the same name is already stored? if name in self.store.keys(): raise AlreadyInDataStore() # no, store it! self.store[name] = (lifetime, obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(cls, obj):\n pass", "def put_object(self, pool_name, object_name, data):\n return self.put_object_versioned(pool_name, object_name, data)", "def _put(self, name, document):\n raise NotImplementedError", "def put(self, obj):\n\n if obj is None:\n return\n\n return obj", "def store_object(self, _object):\n\n # replace an existing list member, else, append\n\n index = [self.object_store.index(_object_) for _object_ in self.object_store if _object_.LocalID == _object.LocalID]\n\n if index != []:\n\n self.object_store[index[0]] = _object\n\n #if self.settings.LOG_VERBOSE: logger.debug('Updating a stored object: %s in region \\'%s\\'' % (_object.FullID, self.region.SimName))\n\n else:\n\n self.object_store.append(_object)\n\n #if self.settings.LOG_VERBOSE: logger.debug('Stored a new object: %s in region \\'%s\\'' % (_object.LocalID, self.region.SimName))", "def put(self, *args, **kwargs):\n self.before_put(*args, **kwargs)\n\n super(DatastoreModel, self).put(*args, **kwargs)\n\n self.after_put(*args, **kwargs)", "def put(self, obj):\n\n if obj is None:\n return\n\n obj = self.put_process(obj)\n return Placeholder(obj).write()", "def putobjname(self,objname_): # 3\n res = self.__obj.putobjname(objname_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add(self, obj):\n self.getSession().add(obj)\n self.commit() # paranoially\n return obj", "def put_object(self, bucket_name, key, data):\n url = self.__key_url(bucket_name, key)\n resp = self.infinispan_client.put(url, data=data,\n auth=self.basicAuth,\n headers=self.headers)\n logger.debug(resp)", "def put(self,data):\n\n \n try:\n\n db = getDatabase()\n connection = db.connect()\n \n connection.put(self,data)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def store_update(request, store_name, obj_id, data):\n storedb = redis.Redis(host=HOST, db=STOREDB)\n\n if store_name not in get_store(request):\n return json_response(status=\"ERROR\", status_code=404, error=\"Store does not exist.\") \n \n # Get data from PUT request\n \n storedb.set(store_name + \":\" + obj_id, data)\n\n return obj_id", "def insert_object(self, object: ObjectHandle):\n # Serialize the object descriptor and data part. Both items are stored\n # as separate objects.\n descriptor, data = self.factory.serialize(object)\n object_id = self.store.write_object(descriptor)\n data_id = self.store.write_object(data)\n # Add the object information to the index and write the modified index\n # to the data store.\n self.index[object.namespace][object.name] = StoredObject(\n object_id=object_id,\n data_id=data_id,\n name=object.name,\n descriptor=descriptor\n )\n self._write_index()\n # If the object refers to a default object that object is removed since\n # it has been overwritten by the new object.\n try:\n del self.defaults.get(object.namespace, {})[object.name]\n except KeyError:\n pass", "def put_object(self, key, data):\n self.s3client.put_object(Bucket=self.s3_bucket, Key=key, Body=data)", "def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)", "def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()", "def update_object(self, name: str) -> None:", "def put(self, key, value):\n c = self.conn.cursor()\n c.execute(\"REPLACE INTO metastore (k, v) VALUES (?, ?)\", (key, value))\n self.conn.commit()\n return True", "def add(self, object):\n self.lock.acquire()\n self.__Session.add(object)\n self.__Session.commit()\n self.lock.release()", "def add_object(self, obj: str):\n if obj not in self._objects:\n self._objects.append(obj)\n else:\n raise IDAlreadyExists", "def add(self, obj):\n self.session.add(obj)", "def put(self, entity):\n self._cur_batch.put(entity)\n self._num_mutations += 1\n if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n self.commit()\n self.begin()", "def update_object(self, name: str) -> None:\n try:\n object = Object.from_name(name)\n except Object.NotFound:\n record = self.catalog.get(name) # must be name pattern recognized by catalog\n log.info(f'Creating new object for {name}')\n Object.add({'type_id': self.__get_type_id(record), 'aliases': self.__get_names(record),\n 'ra': record.ra, 'dec': record.declination, 'redshift': record.redshift,\n 'data': {'tns': record.to_json()}})\n else:\n # find best alternate identifier for catalog search\n for provider in ('iau', 'ztf', 'atlas'): # preferred ordering\n if provider in object.aliases:\n if name != object.aliases[provider]:\n log.debug(f'Searching with name {object.aliases[provider]} <- {name}')\n name = object.aliases[provider]\n break\n else:\n raise TNSError(f'Object ({name}) not found in catalog')\n record = self.catalog.get(name)\n self.__ensure_iau_pattern(record.name)\n if info := self.__build_info(object, record):\n Object.update(object.id, **info)\n else:\n log.info(f'No changes found for {name}')", "def __setitem__(self, name, obj):\n\n with pd.get_store(self.store_path) as store:\n if isinstance(obj, dict) or isinstance(obj, OrderedDict):\n obj = sanitize_dict(obj)\n store.put(name, pd.Series(obj))\n elif isinstance(obj, pd.DataFrame):\n store.put(name, obj)\n elif isinstance(obj, pd.Series):\n store.put(name, obj)\n else:\n log.warning(\"'{}' not saved because {} are not handled.\".format(name, type(obj)))", "def salesforce_insert(self, obj_name, **kwargs):\n self.builtin.log(\"Inserting {} with values {}\".format(obj_name, kwargs))\n obj_class = getattr(self.cumulusci.sf, obj_name)\n res = obj_class.create(kwargs)\n self.store_session_record(obj_name, res[\"id\"])\n return res[\"id\"]", "def put(self):\n raise exceptions.NotImplemented", "def upsert(self, obj):\r\n url = '{0}/upsert'.format(self.get_url())\r\n request = http.Request('PUT', url, self.wrap_object(obj))\r\n\r\n return request, parsers.parse_empty", "def put_object(self, bucket_name, body, key):\n return self._client.put_object(Bucket=bucket_name, Body=body, Key=key)", "def put_object(self, object, key, metadata=None):\n if not metadata:\n self.client.put_object(Bucket=self.bucket, Key=key, Body=object, ACL='bucket-owner-full-control')\n else:\n self.client.put_object(Bucket=self.bucket, Key=key, Body=object, Metadata=metadata, ACL='bucket-owner-full-control')", "def put(self, key, obj):\n \n\n def find(found_item, hash_table_cell):\n \"\"\"This function is a closer function which will pass to the find_by_key function to get the value. the purpose of this \n function is to check if key is already present than replace the value else append the value in same bucket.\"\"\"\n if found_item:\n found_item[1] = obj\n else:\n hash_table_cell.append([key, obj])\n self.size += 1\n self._keys.append(key)\n\n self._find_by_key(key, find)\n return self" ]
[ "0.68534213", "0.6666759", "0.6426029", "0.64064765", "0.6380803", "0.6362623", "0.6329907", "0.6295482", "0.6227268", "0.61896014", "0.6187958", "0.61455274", "0.6086965", "0.60847515", "0.6070142", "0.60651827", "0.6038475", "0.6037728", "0.60343707", "0.6028971", "0.60184914", "0.5991849", "0.5985783", "0.5966981", "0.59159994", "0.5913471", "0.59047556", "0.58982587", "0.58870715", "0.58637774" ]
0.75970155
0
Remove all objects from the datastore which have the lifetime specifies in the parameter
def clear(self, lifetime): # find all entries with the specified lifetime to_remove = [k for (k, v) in self.store.items() if v[0] == lifetime] # remove all these entries from the list for t in to_remove: del self.store[t]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def __del__(self):\r\n self.clearList()", "def remove(self):\n for db in self.values():\n db.remove()", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()", "def clear(self) -> None:\n self.objects = []", "def delete_all(self):\n if not self.created:\n return\n self.shotgun.batch([dict(\n request_type='delete',\n entity_type=type_,\n entity_id=id_,\n ) for type_, id_ in reversed(self.created)])\n self.created = []", "def remove_objects(self):\n logger.debug('Removing all objects from model.')\n del self._objects[:]", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def drop(cls):\n objects = cls.get_all()\n if isinstance(objects, dict) is False:\n for i in cls.get_all():\n i.delete()\n return True\n else:\n return True", "def remove_all_recs(self):\n return self.storage.clear()", "def clear(*objects_to_clear):\n if not hasattr(_thread_local_data, 'current_space'):\n return\n\n space = current_space()\n for obj in objects_to_clear:\n space.clear(obj)", "def destroy_all(self):\n\t\twhile self.members:\n\t\t\tself.destroy(self.members[0], _no_min=True)", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()", "def clear_game_objects(self):\n self.game_objects = list() # [go for go in self.game_objects if go.dont_destroy]\n self.colliders = list()", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()", "def test_data_object_del_all(self):\n pass", "def __del__(self):\n for client in self.client_list:\n del client\n for server in self.server_list:\n del server", "def invalidate_cache(self):\n #self.objects.objects = []\n return True", "def __del__(self) -> None:\n self.map.ent_id.discard(self.id)", "def delete(self):\n self._objects.remove(self._objects[0])\n return self._objects", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def clear(self):\n try:\n self._load(False)\n except KeyError:\n return\n\n for i in xrange(self.size):\n try:\n del self.db[i]\n except KeyError:\n pass\n del self.db['count']\n del self.db['head']\n del self.db['size']", "def clear_cache(self):\n self.mongo_database.cache.delete_many({})", "def delete_all(self):\n raise NotImplementedError()", "def removeall(self):\n\n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n for key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n \n self.keys = {}\n self.buffered = {}\n self.cache = {}\n \n if self.autocommit:\n commit()", "def _delete_all_containers(self):\n for container_ref in self.created_entities['container']:\n self.barbicanclient.containers.delete(container_ref)" ]
[ "0.67365104", "0.6575106", "0.64553666", "0.641907", "0.64045995", "0.6399982", "0.6379741", "0.6345771", "0.6343712", "0.6339164", "0.63021815", "0.6302086", "0.62741226", "0.6257968", "0.62455934", "0.6240149", "0.6227389", "0.62022084", "0.6196959", "0.6175234", "0.61702436", "0.6148047", "0.61446905", "0.6142774", "0.61263627", "0.61238873", "0.6120748", "0.6100467", "0.60938776", "0.608845" ]
0.7027587
0
Returns a string with the given datetime formatted as a timestamp for the agent's history folder
def create_history_timestamp(dt=None): if dt is None: dt = datetime.datetime.utcnow() return dt.strftime('%Y-%m-%dT%H-%M-%S')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_stamper() :\n\treturn datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")", "def get_git_timestamp(path):\n return int(_run_command(path, 'git log -1 --format=%ct'))", "def timestamp():\n return datetime.now().strftime('%H:%M:%S %m-%d')", "def log_timestamp():\n now = time.localtime()\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", now)\n return timestamp", "def timestamp():\n return datetime.utcnow().strftime(\"%F %T\")", "def timestamp():\n return datetime.now().strftime(\"%Y%m%dT%H%M%S\")", "def time_stamp():\n \n today = datetime.datetime.now()\n return today.strftime(\"%Y-%m-%d %a %H:%M\")", "def _get_timestamp() -> str:\n\n dt = timezone(\"UTC\").localize(datetime.utcnow()).strftime(\"%b. %d, %Y#%H:%M UTC\")\n date, time = dt.split(\"#\")\n return f\"Event Timestamp: 📅 {date} 🕒 {time}\"", "def timeStamp():\n import time\n return str(time.strftime(\"%a %d %b %Y %I:%M:%S %p\"))", "def timestamp():\n\treturn datetime.now().strftime(\"%Y%m%d_%H%M%S\")", "def timestamp():\n print(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\") + \" \" + __file__)", "def hg_timestamps_handler(timestamp, timezone):\n if \"-\" in timezone:\n ts = int(timestamp[:-2]) - int(timezone)\n else:\n ts = int(timestamp[:-2]) + int(timezone)\n return datetime.utcfromtimestamp(ts).strftime(\"%m-%d-%Y %H:%M:%S\")", "def timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d-T%H-%M-%S\")", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def __get_timestamp() -> str:\n return str(datetime.now().astimezone())", "def timestamp():\n return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')", "def create_timestamps(self):\n str_now = time.strftime(\"%Y%m%d_%H%M%S_\", time.localtime())\n return str_now", "def get_timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")", "def create_human_readable_timestamp(self, now):\n hrdatetime = now.strftime('%c')\n return hrdatetime", "def timestamp():\n my_date_object = datetime.utcnow()\n my_date_string = my_date_object.strftime('%d-%m-%Y %H:%M:%S')\n return my_date_string", "def get_timestamp():\n return time.strftime('%Y-%m-%d %H:%M:%S')", "def get_datetime_str():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())", "def _create_time_stamp() -> str:\n\n return datetime.datetime.now().strftime(\"%Y%m%d\")", "def _get_timestamp():\n return str(int(time.time()))", "def getTimeToFileName(self):\n return self.sNow.strftime(\"%d-%m-%Y_%H-%M-%S\")", "def _generate_timestamp():\n\t\treturn strftime(\"%Y%m%dT%H%M%S\")", "def get_time_stamp_str() -> str:\n return datetime.datetime.now().strftime(DateFormat)", "def time_stamp(self, line):\n time_delta = datetime.now() - self._start_time\n return '(' + ':'.join(str(time_delta).split(':')[1:]) + ') ' + line", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def timestamp():\n\tn = datetime.datetime.now()\n\treturn \"%04d-%02d-%02dT%02d:%02d:%02d\" % (\n\t\tn.year, n.month, n.day, n.hour, n.minute, n.second\n\t)" ]
[ "0.62167615", "0.6157197", "0.6116242", "0.60908395", "0.60371304", "0.6034678", "0.598345", "0.5981551", "0.5960698", "0.5942786", "0.591269", "0.59055346", "0.59042066", "0.58993036", "0.58993036", "0.5898477", "0.58732146", "0.5872645", "0.5860633", "0.58602196", "0.58313346", "0.5795374", "0.5779092", "0.57774913", "0.57743275", "0.5770823", "0.575827", "0.5755237", "0.5740483", "0.5738937" ]
0.7140919
0
Parse a hue name into a float from 0 to 100.
def numerical_hue(hue): if isinstance(hue, str): match = hue_regex.match(hue) if match: return float(match[1]) + HUE_NAMES_TO_NUMBERS[match[3]] else: return float(hue) return hue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hue(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n r = all_data[name][0]\n g = all_data[name][1]\n b = all_data[name][2]\n denom = max(r,g,b) - min(r,g,b)\n if r > g and r > b:\n return (g - b)/denom\n elif g > r and g > b:\n return 2.0 + (b - r)/denom\n else:\n return 4.0 + (r - g)/denom\n except KeyError:\n raise KeyError(\"No Sensor with that name\")", "def hue(value):\n value = int(value)\n if value < 0 or value > 65535:\n raise ValueError('Hue is a value between 0 and 65535')\n return value", "def create_unique_color_float(tag, hue_step=0.41):\n h, v = (tag * hue_step) % 1, 1. - (int(tag * hue_step) % 4) / 5.\n r, g, b = colorsys.hsv_to_rgb(h, 1., v)\n return r, g, b", "def normalize_hue_transition(transition):\n if transition is not None:\n # hue transition duration is in milliseconds and round them to 100ms\n transition = int(round(transition, 1) * 1000)\n\n return transition", "def normalize_hue_brightness(brightness):\n if brightness is not None:\n # Hue uses a range of [0, 100] to control brightness.\n brightness = float((brightness / 255) * 100)\n\n return brightness", "def hsvHue(rgb):\n return rgbToHsv(rgb)[0]", "def to_hass_level(level):\n return int((level * 255) / 100)", "def parse_color(colstr):\n if('rgb' in colstr):\n expr = r\"rgb\\(\\s*([0-9]{1,3})\\,\\s*([0-9]{1,3})\\,\\s*([0-9]{1,3})\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 3):\n rgb = (int(results.group(1)), int(results.group(2)), int(results.group(3)))\n for val in rgb:\n if(val > 255):\n print(\"Invalid rgb color. All values should be from 0-255.\")\n exit(-1)\n\n return ('rgb', rgb)\n\n else:\n print(\"Invalid rgb color. See help for more information\")\n exit(-1)\n elif('#' in colstr):\n if(len(colstr) != 7):\n print(\"Invalid hex value. Only 6 digit hex values are supported\")\n exit(-1)\n else:\n try:\n hexval = int(colstr[1:], 16)\n return (\"hex\", hexval)\n except:\n print(\"Failed to parse hex color\")\n exit(-1)\n elif('hsl' in colstr):\n expr = r\"hsl\\(\\s*([0-9]{1,3})\\,\\W*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 3):\n h = int(results.group(1))\n s = int(results.group(2))\n l = int(results.group(3))\n if(h < 0 or h > 359):\n print(\"Hue out of range. Range: 0-359\")\n exit(-1)\n elif(s < 0 or s > 100):\n print(\"Saturation out of range. Range: 0-100%\")\n exit(-1)\n elif(l < 0 or s > 100):\n print(\"Lightness out of range. Range: 0-100%\")\n exit(-1)\n else:\n return (\"hsl\", (h, s, l))\n else:\n print(\"Invalid hsl color. See help for more information\")\n exit(-1)\n elif('cmyk' in colstr):\n expr = r\"cmyk\\(\\s*([0-9]{1,3})%{0,1}\\,\\W*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 4):\n c = int(results.group(1))\n m = int(results.group(2))\n y = int(results.group(3))\n k = int(results.group(4))\n\n if(c < 0 or c > 100):\n print(\"Cyan out of range. Range: 0-100%\")\n exit(-1)\n elif(m < 0 or m > 100):\n print(\"Magenta out of range. Range: 0-100%\")\n exit(-1)\n elif(y < 0 or y > 100):\n print(\"Yellow out of range. Range: 0-100%\")\n exit(-1)\n elif(k < 0 or k > 100):\n print(\"Black out of range. Range 0-100%\")\n exit(-1)\n else:\n return(\"cmyk\", (c, m, y, k))\n else:\n print(\"Invalid cmyk color. See help for more information\")\n exit(-1)", "def color_conversion(string):\n if (string == 'J'):\n return 0.14\n if (string == 'I'):\n return 0.28\n if (string == 'H'):\n return 0.42\n if (string == 'G'):\n return 0.56\n if (string == 'F'):\n return 0.70\n if (string == 'E'):\n return 0.84\n if (string == 'D'):\n return 1", "def __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max):\r\n\r\n if var_max == var_min:\r\n return 0.0\r\n elif var_max == var_R:\r\n return (60.0 * ((var_G - var_B) / (var_max - var_min)) + 360) % 360.0\r\n elif var_max == var_G:\r\n return 60.0 * ((var_B - var_R) / (var_max - var_min)) + 120\r\n elif var_max == var_B:\r\n return 60.0 * ((var_R - var_G) / (var_max - var_min)) + 240.0", "def getColor(self, _color):\n c = _color.split()\n \n for n in range(len(c)):\n c[n] = float(c[n])\n \n return c", "def percentage_to_float(self, val):\n return float(val.strip('%'))/100", "def convert_to_float(percentage):\n return float(percentage.strip('%')) / 100", "def _to_float(self, s: str) -> float:\n return int(s[:-1]) / 1e9 if s.endswith('n') else float(s[:-1])", "def getColorValueFromName( strName ):\n dictColor = {\n \"blue\": 0x0000FF,\n \"red\": 0xFF0000,\n \"green\": 0x00FF00,\n \"yellow\": 0xFFFF00,\n \"purple\": 0x801187,\n \"pink\": 0xff00ff,\n \"orange\": 0xff9e00,\n \"brown\": 0x733300,\n \"black\": 0x000000,\n \"grey\": 0x7F7F7F, \n \"white\": 0xFFFFFF,\n };\n try:\n nColor = dictColor[strName.lower()];\n return nColor;\n except:\n pass\n print( \"WRN: abcdk.color.getColorValueFromName: don't know color '%s'\" % strName );\n return -1;", "def setHue ( self, newhue ):\n if isinstance( newhue, int ):\n newhue /= 360.0\n if newhue > 1.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n self.h = newhue\n self.hsl[0] = newhue\n self.hsla[0] = newhue\n self.updateFromHsl()", "def hs_color(self) -> tuple[float, float] | None:\n colour_json = self.tuya_device.status.get(self.dp_code_colour)\n if not colour_json:\n return None\n colour_data = json.loads(colour_json)\n s_range = self._tuya_hsv_s_range()\n return colour_data.get(\"h\", 0), self.remap(\n colour_data.get(\"s\", 0),\n s_range[0],\n s_range[1],\n HSV_HA_SATURATION_MIN,\n HSV_HA_SATURATION_MAX,\n )", "def convert_percent_str(x):\n if x:\n return float(str(x).strip(\"% \"))\n return 0", "def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError", "def brightness_to_percentage(brightness):\n return int((brightness * 100.0) / 255.0)", "def brightness_from_percentage(percent):\n return int((percent * 255.0) / 100.0)", "def parse_color(color):\n try:\n color = webcolors.name_to_rgb(color)\n return color.red, color.green, color.blue\n except ValueError:\n pass\n\n try:\n color = webcolors.hex_to_rgb(color)\n return color.red, color.green, color.blue\n except ValueError:\n pass\n\n try:\n data = color.split(\",\")\n return int(data[0]), int(data[1]), int(data[2])\n except Exception:\n pass\n\n return None", "def extract_float(self, s: str) -> float:\n f = re.findall(r'([0-9]*[.]*[0-9]+)', s)\n return float(f[0]) if len(f) > 0 else None", "def get_hp_bar_color(total, value):\n\t# 256 values per color, so red-to-yellow and yellow-to-green make 512 - but yellow gets counted twice, so it's really 511\n\tnum_colors = 511\n\tc = scale(value, total, num_colors)\n\treturn (min(num_colors - c, 255), min(c, 255), 0)", "def _get_color(self, color_name):\n if not color_name:\n return 0\n\n if color_name == 'ORANGE':\n color = self.COLOR_ORANGE\n else:\n color = getattr(curses, 'COLOR_' + color_name)\n return curses.color_pair(color)", "def color_from_value(self, value):\n \n return ImageColor.getrgb(\"hsl(%d,%d%%,%d%%)\" % (int( (1.0 - value) * 360 ), 80, 50))", "def hsv_saturation_and_value_factor(rgb):\n hsv = filters.filter_rgb_to_hsv(rgb, display_np_info=False)\n s = filters.filter_hsv_to_s(hsv)\n v = filters.filter_hsv_to_v(hsv)\n s_std = np.std(s)\n v_std = np.std(v)\n if s_std < 0.05 and v_std < 0.05:\n factor = 0.4\n elif s_std < 0.05:\n factor = 0.7\n elif v_std < 0.05:\n factor = 0.7\n else:\n factor = 1\n\n factor = factor ** 2\n return factor", "def get_change_color(self, change_percentage, color_range: int = 5):\n change_percentage = str(change_percentage).split('.')[0] # before the dot\n red = colour.Color(\"#D50000\")\n white = colour.Color(\"#FFFFFF\")\n green = colour.Color(\"#1B5E20\")\n\n int_perc = int(change_percentage)\n\n if int_perc is 0:\n return self.bot.hex_to_int(\"#ffffff\")\n elif change_percentage.startswith('-'):\n colors = list(red.range_to(white, color_range))\n int_perc = int_perc * -1 # make it positive\n int_perc = color_range - int_perc\n int_perc = int_perc if int_perc > 0 else 0 # limit\n return self.bot.hex_to_int(colors[int_perc].hex_l)\n\n int_perc -= 1\n colors = list(white.range_to(green, color_range))\n int_perc = int_perc if int_perc < (color_range - 1) else (color_range - 1) # limit\n return self.bot.hex_to_int(colors[int_perc].hex_l)", "def percent_parse(pstring):\n if pstring.strip().endswith('%'):\n return int(pstring.strip()[:-1]) / 100\n else:\n return np.nan", "def hd_position_to_hass(hd_position):\n return round((hd_position / MAX_POSITION) * 100)" ]
[ "0.67143726", "0.6352335", "0.6242957", "0.61540055", "0.59315515", "0.58809745", "0.5822786", "0.5745986", "0.5706333", "0.56886595", "0.56240076", "0.5592977", "0.5583482", "0.55316204", "0.5526717", "0.5519031", "0.5476976", "0.54667604", "0.54288155", "0.5397848", "0.53843737", "0.5383294", "0.5377416", "0.5371541", "0.5347557", "0.5308376", "0.5306074", "0.5272331", "0.52146566", "0.5212267" ]
0.7692158
0
Makes slices from iterator and process them via a process pool.\n
def slice_and_run(single_iterator: permutations): step = 10000000 start = 0 stop = start + step # I use next_it bool to make sure to create one more slice with no end limit when slices are finished next_it = False while True: if next_it is False: cake_slice = islice(single_iterator, start, stop) else: cake_slice = islice(single_iterator, start, None) if args.cores is None: with Pool() as pool: data = pool.map(printer, cake_slice) else: with Pool(int(args.cores)) as pool: data = pool.map(printer, cake_slice) start += step stop += step if next_it is True: break if not data: next_it = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pool():\n pop = iter([ 'a', 'b', 'c', 'd', 'e' ])\n pop = ops.pool(pop, size=3)\n\n assert(len(pop) == 3)\n assert(pop == [ 'a', 'b', 'c' ])", "def chunker( it, size ):\n \n # Variables\n it = iter( it )\n \n # Selecting a bunch of jobs\n while True:\n p = tuple( itertools.islice( it, size ) )\n if not p:\n break\n yield p", "def make_slices(big_scriptlist):\n num_cores = multiprocessing.cpu_count()\n list_of_scriptlists = [] # This will be our output.\n incrementlist = range(0,len(big_scriptlist),num_cores) # How we increment.\n for i in incrementlist:\n list_of_scriptlists.append(big_scriptlist[i:i+num_cores])\n return list_of_scriptlists", "def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining", "def _chunker(self, seq, size):\n return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))", "def chunk(it, size):\n it = iter(it)\n return iter(lambda: list(islice(it, size)), [])", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def chunk(it, size):\n\tit = iter(it)\n\treturn iter(lambda: tuple(islice(it, size)), ())", "def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):\n with Timer('\\t{0} ({1}) completed in'.format(process_name, str(func))):\n pool = Pool(cpus)\n vals = pool.map(func, iterable)\n pool.close()\n return vals", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))", "def _islice_batched(it: Iterator[np.ndarray], n: int) -> Iterator[np.ndarray]:\n while n > 0:\n arr: np.ndarray = next(it)\n k = arr.shape[0]\n yield arr[:n, :]\n n -= k", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def __iter__(self):\n\n starts = range(0, self.data.shape[self.axis], self.chunksize)\n\n for t in zip_longest(starts, starts[1:], fillvalue=None):\n yield self.data[self._slice(*t)]", "def run_async(self, items, pool):\n return pool.imap(self, items)", "def iter_unpack(raw):\n return chunks(raw)", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def _repopulate_pool(self):\n for i in range(self._processes - len(self._pool)):\n w = self.Process(target=worker,\n args=(self._inqueue, self._outqueue,\n self._initializer,\n self._initargs, self._maxtasksperchild,\n self._wrap_exception,\n self._finalizer,\n self._finalargs)\n )\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def start_multi(lines, func, p=6, suffix=0, write=False, pre_train=True):\n _info('Execute by {}th processes'.format(p))\n pool = Pool(p)\n \n if type(lines) is not zip:\n num_each_b = len(lines) // p\n else:\n # fine tune step, the lines is zip type,\n # this kind of type has no length, so just set the lines to data_b\n data_b = lines\n \n results = []\n for i in range(p):\n if i < (p-1):\n if pre_train: # the reason for adding judgement here is just identical to the above comment\n data_b = lines[i * num_each_b : i * num_each_b + num_each_b]\n else:\n if pre_train:\n data_b = lines[i * num_each_b :]\n if not write:\n if pre_train:\n results.append(pool.apply_async(func, (data_b, )))\n else:\n assert p == 1, _error('process number should equal to 1 when save the file', head='Value Error')\n pool.apply_async(func, (data_b, suffix, pre_train))\n \n if not write:\n for i in range(p):\n results[i] = results[i].get()\n \n pool.close()\n pool.join()\n \n if not write:\n return list(chain(*results))", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result" ]
[ "0.6685673", "0.65190715", "0.6181122", "0.59900594", "0.59768945", "0.5964202", "0.593769", "0.5937118", "0.59308493", "0.59113395", "0.5901824", "0.5894978", "0.5858956", "0.58363086", "0.5830125", "0.58101326", "0.5802128", "0.5788993", "0.5778363", "0.5738621", "0.57337373", "0.56961364", "0.569297", "0.567752", "0.56762695", "0.5671878", "0.5671878", "0.5671878", "0.56615007", "0.56479824" ]
0.6883492
0
Apply leet mutagen and then if needed apply append and prepend to leeted version of the string.\n
def leet(line_leet: str): for old_printer, new_printer in leet_replacements: line_leet = line_leet.replace(old_printer, new_printer) print(line_leet) if args.append is not None: print(line_leet + args.append) if args.prepend is not None: print(args.prepend + line_leet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)", "def lemmatize(text):\n\n lem = WordNetLemmatizer()\n return ' '.join(list(map(lambda x: lem.lemmatize(x, 'v'),\n text.split())))", "def lemmatize(self, sentence):\n porter_stemmer = PorterStemmer()\n return ' '.join(porter_stemmer.stem(str(w)) for w in sentence.lower().split())", "def lemmatiser(list_of_words, tag):\n \n output = []\n for entry in list_of_words:\n if phrases:\n # just get the rightmost word\n word = entry[-1]\n entry.pop()\n else:\n word = entry\n if translated_option.startswith('u'):\n if word in taglemma:\n word = taglemma[word]\n else:\n if word == 'x':\n word = 'Other'\n # only use wordnet lemmatiser when appropriate\n elif not dependency:\n if word in wordlist:\n word = wordlist[word]\n word = lmtzr.lemmatize(word, tag)\n # do the manual_lemmatisation\n else:\n if word in wordlist:\n word = wordlist[word]\n if phrases:\n entry.append(word)\n output.append(entry)\n else:\n output.append(word)\n return output", "def translate_leet(phrase):", "def petit_nettoyage(ligne, lem_v=True, lem_n=True, len_elt=2, stopw=[]):\n lemmatizer = WordNetLemmatizer()\n for elt in ligne:\n if elt in (string.punctuation + string.digits):\n ligne = ligne.replace(elt, \" \")\n if lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if lemmatizer.lemmatize(elt, pos=\"v\") not in stopw\n ]\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in liste\n if len(lemmatizer.lemmatize(elt, pos=\"n\")) > len_elt\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"v\") not in stopw)\n and (len(elt) > len_elt)\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"n\") not in stopw)\n and (len(elt) > len_elt)\n ]\n else:\n liste = [\n elt\n for elt in ligne.split()\n if (elt not in stopw) and (len(elt) > len_elt)\n ]\n ligne = \" \".join(liste)\n return ligne", "def lemmatiser(list_of_words, tag):\n output = []\n for word in list_of_words:\n if translated_option.startswith('u'):\n word = taglemma.get(word.lower(), 'Other')\n else:\n word = wordlist.get(word, lmtzr.lemmatize(word, tag))\n if not preserve_case:\n word = word.lower()\n output.append(word)\n return output", "def add2chain1st(splitted_text):\n # Our key is the unique occurrence of a pair of words\n inputText = splitted_text\n if len(inputText) > 1:\n for i, word in enumerate(inputText):\n if i == 0: # Chaining the first and second word in tweet to start key\n if (startKey) not in chain:\n chain[(startKey)] = [word]\n else:\n chain[(startKey)].append(word)\n else:\n if (inputText[i-1]) not in chain:\n chain[(inputText[i-1])] = [word]\n else:\n chain[(inputText[i-1])].append(word)\n if i == len(inputText)-1: # If sentence ends here, connect to end\n if (word) not in chain:\n chain[(word)] = [endKey]\n else:\n chain[(word)].append(endKey)\n if (startKey) not in chain:\n chain[(startKey)] = [inputText[0]]\n else:\n chain[(startKey)].append(inputText[0])\n if (inputText[0]) not in chain:\n chain[(inputText[0])] = [endKey]\n else:\n chain[(inputText[0])].append(endKey)", "def love(some_text):\n #This function will take a text and change the secound last word to \"love\".\n sentence = some_text.split()\n #print(sentence)\n sentence[-2] = \"love\"\n new_text = \" \".join(sentence)\n #print(new_text)\n return new_text", "def lemmatize_sentence(sentence: str) -> str:\n\n nltk_tagged = nltk.pos_tag(nltk.word_tokenize(sentence))\n # tuple of (token, wordnet_tag)\n wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)\n lemmatized_sentence = []\n for word, tag in wordnet_tagged:\n if tag is None:\n # if there is no available tag, append the token as is\n lemmatized_sentence.append(word)\n else:\n # else use the tag to lemmatize the token\n lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))\n return \" \".join(lemmatized_sentence)", "def mutateSentences(st):\n # BEGIN_YOUR_CODE (our solution is 20 lines of code, but don't worry if you deviate from this)\n done, length = [], len(st.split())\n def alike(num):\n return [i for i,w in enumerate(st.split()) if st.split()[num] == w]\n def fun(i,branch_at_i):\n if len(branch_at_i) == length:\n done.append(branch_at_i)\n return ''\n if i == length-1:\n return ''\n al = alike(i)\n if len(al)>1:\n for j in al:\n fun(j+1,branch_at_i+f'{j+1}')\n return ''\n fun(i+1,branch_at_i+f'{i+1}')\n [fun(i,f'{i}') for i in range(length)]\n return list({' '.join(st.split()[int(i)] for i in comb) for comb in done})", "def lemmatize_text(text):\n text = nlp(text)\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n return text", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word).lower() for word in query]\n return \" \".join(wordlist)", "def make_text(sent, begin, end):\n lemmas = [sent.morps[begin].lemma(), ]\n for idx in range(begin+1, end):\n if sent.mid2wid[idx-1] != sent.mid2wid[idx]: # if go over word boundary\n # insert space between words\n lemmas.append(' ')\n lemmas.append(sent.morps[idx].lemma())\n return ''.join(lemmas)", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word) for word in query.split()]\n return \" \".join(wordlist)", "def lemmatize(text):\n word_tokens = nltk.word_tokenize(text)\n lemmatized_word = [wordnet_lemmatizer.lemmatize(word) for word in word_tokens]\n return (\" \".join(lemmatized_word))", "def combineparse2sent(sent, parse):\n parse = parse.split()\n tokenlist = [token.word for token in sent.tokenlist]\n parselist, tidx = [\"\"] * len(tokenlist), 0\n while parse:\n item = parse.pop(0)\n parselist[tidx] += (\" \" + item)\n partialparse = parselist[tidx].replace(' ', '')\n word = tokenlist[tidx].replace(' ', '')\n # print word, partialparse\n if (word + ')') in partialparse:\n tidx += 1\n # Attach to sent\n for (tidx, token) in enumerate(sent.tokenlist):\n item = parselist[tidx]\n sent.tokenlist[tidx].partialparse = item\n return sent", "def replaceElongated(word):\n\n repeat_regexp = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\n repl = r'\\1\\2\\3'\n if wordnet.synsets(word):\n return word\n repl_word = repeat_regexp.sub(repl, word)\n if repl_word != word: \n return replaceElongated(repl_word)\n else: \n return repl_word", "def _append_with_string_merge(seq, new_item):\n if seq and isinstance(new_item, text_type) and isinstance(seq[-1], text_type):\n s = seq.pop()\n seq.append(s+new_item)\n else:\n seq.append(new_item)", "def lemmatize(data: pd.Series) -> pd.Series:\n lemmatizer = WordNetLemmatizer()\n return data.apply(lambda row: re.sub(\n r'\\b\\w+\\b', lambda match: lemmatizer.lemmatize(\n match.group(), pos=to_pos([match.group()])), row))", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def rep_legtags(text, footnotes):\n textstring = text\n tagsintext = []\n taggedtextlist = []\n tagtextpat = re.compile(r'\\[/[a-z]\\]')\n tagtextitir = tagtextpat.finditer(textstring)\n for tagfound in tagtextitir:\n closetag = tagfound.group()\n opentag = \"[\" + closetag[2:]\n tag = opentag[1:-1]\n tagsintext.append(tag)\n tagtextlist = get_tagtext(textstring, tag)\n for taggedtext in tagtextlist:\n tagstring = opentag + taggedtext + closetag\n taggedtextlist.append(tagstring)\n for tag in tagsintext:\n tagplace = tagsintext.index(tag)\n replacetext = taggedtextlist[tagplace]\n for footnote in footnotes:\n if footnote[:2] == tag + \" \":\n if \"[/LRep]\" in footnote:\n replacementlist = get_tagtext(footnote, \"LRep\")\n repstring = \"[LRep]\" + replacementlist[0] + \"[/LRep]\"\n textstringlist = textstring.split(replacetext)\n textstring = repstring.join(textstringlist)\n return textstring", "def __chunk_lemmatize(self, chunk):\n\n word = self.lemmatizer.lemmatize(chunk.text, pos='n')\n\n return word", "def love(text):\n a = text.split( )\n l = (len(a)-2)\n del a[l]\n a.insert(l,\"love\")\n return ' '.join(a)", "def lemmatize_text_rus(text):\n text_lemm, text_sent = lemmatize_texts_rus([text])\n text_lemm, text_sent = text_lemm[0], text_sent[0]\n return text_lemm, text_sent", "def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self", "def remplace_lettre(mot, lettre, lpos):\n copie=\"\"\n for i in range(len(mot)):\n if i in lpos:\n copie += lettre\n else:\n copie += mot[i]\n return copie", "def lemmatize_words(text: str, lemmatizer=WordNetLemmatizer()) -> str:\n return ' '.join(lemmatizer.lemmatize(word) for word in text.split())", "def apply(self, text):", "def lemmatize(wn, word, pos='N'):\n pos = get_wordnet_pos(wn, pos)\n lemmas = wn._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word" ]
[ "0.5577453", "0.5502467", "0.54995257", "0.5455092", "0.5454772", "0.5342663", "0.53197235", "0.5301585", "0.5287481", "0.52212524", "0.52083796", "0.52012616", "0.51859516", "0.51522714", "0.5134461", "0.5132348", "0.5117089", "0.51013833", "0.5083767", "0.5057464", "0.5054277", "0.50461245", "0.50409806", "0.50339156", "0.50154597", "0.50148946", "0.5012146", "0.5011152", "0.4987812", "0.4959818" ]
0.65273523
0
This snippet will roundup any given integer or float, to at least 10 or to the highest multiple of 10. You can pass an unsigned/positive integer as the "end" argument if you want the number to be rounded to at least a multiple of "end". Returned value will always be an integer.
def roundAlwaysUp( toRound, end = 10 ): end = abs( int(end) ) if end == 0: end = 10 times = toRound/end if times >= 0: times = times + 1 else: times = times - 1 return ( int( times ) )*end;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upround(x, base):\n return base * math.ceil(float(x) / base)", "def roundUP(x):\n\treturn int(ceil(x / 10.0)) * 10", "def roundTo(numberValue, baseValue):\n # Round a numberValue, to multiples of baseValue\n return( numberValue + (baseValue - numberValue) % baseValue ) # roundTo", "def round_up(number: int, multiple: int) -> int:\n assert multiple != 0\n\n return int((number + multiple - 1) / multiple) * multiple", "def RoundUp(value, boundary):\n return (value + boundary - 1) & ~(boundary - 1)", "def roundup_int(x, m):\n\treturn int(math.ceil(x / float(m))) * m", "def round_to_multiple_of(val, divisor, round_up_bias=0.9):\n assert 0.0 < round_up_bias < 1.0\n new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)\n return new_val if new_val >= round_up_bias * val else new_val + divisor", "def round_up(x, sig=2):\n dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)\n return math.ceil(x * dig) / dig", "def downround(x, base):\n return base * math.floor(float(x) / base)", "def round_up(number, decimals=0):\n multiplier = 10 ** decimals\n return math.ceil(number * multiplier) / multiplier", "def round_up_div(value: int, divisor: int) -> int:\n return (value + divisor - 1) // divisor", "def ceil_10(i: int) -> int:\n return 10 ** (math.ceil(math.log10(i)))", "def my_round(x, base=10):\n return base * round(x / base)", "def round_down(x):\n return int(math.floor(x / 10.0)) * 10", "def _round_to_nearest_multiple_up(x, n=5):\n return n * math.ceil(float(x) / n)", "def _pow_10_round(n, up=True):\n if up:\n return 10 ** math.ceil(math.log(n, 10))\n else:\n return 10 ** math.floor(math.log(n, 10))", "def roundup100(x):\n\treturn int(math.ceil(x / 100.0)) * 100", "def ceil_int(i: int, base: int) -> int:\n return ((i - 1) // base) * base + base if i >= 0 else (i // base) * base", "def rint(flt: float) -> int | float:\n return int(rounded) if (rounded := round(flt, 2)).is_integer() else rounded", "def ceil(x):\n return 0.0", "def round_down(val):\n floor_val = val\n\n try:\n if not is_empty(val):\n float_val = float(val)\n floor_val = math.floor(float_val)\n except Exception as e:\n pass\n\n return floor_val", "def ceil(data):\n return _make.ceil(data)", "def round_to(value: float, target: float):\n rounded = int(round(value / target)) * target\n return rounded", "def roundto(x, to=10.0):\n if to and not math.isnan(x):\n return int(round(x / to)) * to\n else:\n return x", "def round(num, divisor, direction):\n\n if direction.lower() == 'down':\n return floor(num / divisor) * divisor\n elif direction.lower() == 'up':\n return ceil(num / divisor) * divisor", "def ceil_exact(val, flt_type):\n return -floor_exact(-val, flt_type)", "def divide_and_round_up(x, y):\n return ((x - 1) // y) + 1", "def ceil_to_value(number, round_to):\n number = float(number)\n round_to = float(round_to)\n return (np.ceil(number / round_to) * round_to)", "def __ceil__(self, ???):", "def scale_ceil(value, old_max, new_max):\n\tassert value >= 0\n\tassert value <= old_max\n\treturn div_ceil(new_max * value, old_max)" ]
[ "0.7045952", "0.66892636", "0.6606251", "0.6533036", "0.6437218", "0.63580716", "0.62848383", "0.62514913", "0.62276536", "0.6207034", "0.6131016", "0.6117208", "0.61139303", "0.6111873", "0.606857", "0.60549617", "0.6051162", "0.6042114", "0.6040084", "0.6027941", "0.602464", "0.5989654", "0.59762233", "0.5963634", "0.59195256", "0.5911035", "0.5905964", "0.588863", "0.5807748", "0.57650274" ]
0.7841786
0
Return an array with the single digits of the pin. Heavily assumes the PIN is made up of 4 digits.
def get_pin_digits(pin): digits = [] for i in range(1, 5): digit = pin % 10 pin = int(pin / 10) digits = [digit] + digits return digits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pin(length=6):\n pin = str(random.sample(range(10 ** (length - 1), 10 ** length), 1)[0])\n print(\"pin \"+pin)\n\n return pin", "def __generate_pin(cls) -> str:\n return str(randbelow(10 ** cls.PIN_DIGITS)).zfill(cls.PIN_DIGITS)", "def getNativeDigitsList(self):\r\n return self.phone.sx('(send config-manager get-setting \"./yapas/display/native-digits\")',convertToString=True, doNotReport=True)", "def pin_nums(self):\n return self._pin_nums", "def peel_digits(num):\n str_num = str(num) # Converts to string to utilize Python's strong string features\n digit_list = [] # Creates empty list\n for letter in str_num:\n digit_list.append(int(letter)) # Puts each digits into list\n # print(digit_list) # Testing\n return digit_list", "def digits(x):\n return [int(d) for d in str(x)]", "def return_digits(number):\n res = \"\"\n row = 0\n while row < 7:\n line = \"\"\n column = 0\n while column < len(number):\n symb = int(number[column])\n digit = Digits[symb]\n line += digit[row].replace(\"*\", str(symb))\n column += 1\n row += 1\n if row != 7:\n res += line + \"\\n\"\n else:\n res += line\n return res", "def numbers():\n return '<pre>' + '\\n'.join(phone_numbers) + '</pre>'", "def pin_code(self) -> List[PinCodeSummary]:\n return self._pin_code", "def validate_pin():\r\n print(\"Please enter your 4 digits pin\")\r\n pin_list = []\r\n pin_list2 = []\r\n for i in range(4):\r\n pin = int(input())\r\n pin_list.append(pin)\r\n print(\"Please re-enter your pin to confirm\")\r\n for i in range(4):\r\n pin = int(input())\r\n pin_list2.append(pin)\r\n set1 = set(pin_list)\r\n set2 = set(pin_list2)\r\n if set1 == set2:\r\n print(\"Validating pin completed\")\r\n return 1\r\n print(\"Validating pin failed\")\r\n return 0", "def get_pin(self) -> str:\n return self._pin", "def two_digits_into_list(nr: int) -> list:\n return [int(a) for a in list(str(nr))]\n pass", "def pin(self) -> int:", "def extract_digits(cls, phone_number):\n extracted_num = \"\"\n for ch in phone_number:\n if ch in cls.INTEGER_STRING:\n extracted_num += ch\n return extracted_num", "def _extract_digits(binary):\n digit_labels, labeled_image = _segment(binary)\n\n ans = \"\"\n for digit_label in digit_labels:\n try:\n num = _return_char(_get_digit(binary, labeled_image,\n digit_label))\n except ValueError:\n num = None\n \n if num is not None:\n ans += num\n \n return ans", "def _key_array(key):\n key = map(ord, key)[:16]\n initial = map(ord, \"6170383452343567\")\n while len(key) < len(initial):\n key.append(initial[len(key)])\n return key", "def plusOne(self, digits: List[int]) -> List[int]:\n n = len(digits)\n for i in reversed(range(n)):\n # set all the nines at the end of array to zeros\n if digits[i] == 9:\n digits[i] = 0\n\n # here we have the rightmost not-nine \n else:\n # increase this rightmost not-nine by 1\n digits[i] += 1\n #done\n return digits\n\n # we're here because all the digits are nines\n return [1] + digits", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def get_account_number(raw_data):\r\n return len(raw_data)//4", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def numberToArray(number):\n mantisse = getMantisse(number)\n exponent = getExponent(number)\n number = [mantisse, exponent]\n return(number)", "def toBitArray(number, size):\r\n temp_bitnum = bitarray(64)\r\n count = 0\r\n number = number & 0xFFFFFFFFFFFFFFFF # enforces the number to be 64 bit.\r\n while count < size:\r\n temp_bitnum[63 - count] = (number % 2)\r\n # print \"digit \", count, \" : \", (number % 2)\r\n number = number >> 1\r\n count = count + 1\r\n return temp_bitnum[-size:]", "def int_repr(arr):\n return list(map(list2int, arr))", "def create_code():\n\n code = [0, 0, 0, 0]\n\n for i in range(4):\n value = random.randint(1, 8) # 8 possible digits\n while value in code:\n value = random.randint(1, 8) # 8 possible digits\n code[i] = value\n \n #print(code)\n return code", "def read_pin10(self):\n return self.PIN_10_SIGNAL_TEST", "def crack_pin(self):\n self.ti = time.time()\n n = self.startpin\n L = 10 ** (l) - 1\n c = True\n s = \"X%dd\" % l\n s = s.replace(\"X\", \"%0\")\n print(\n \"[+] startpin: %d, max: %d, wait: %s, reset: %s\"\n % (n, L, str(self.wait), str(self.reset))\n )\n\n while n <= L and self.Continue: # keep looping if the c is set\n self.nc = n\n COMM = self.encode_pin_cmd(n, s)\n r = self.xmit(COMM)\n if r != None:\n data, sw1, sw2 = r\n scommand = str(list(map(hex, COMM)))\n\n if not self.stoping:\n sys.stderr.write(\n \"Pin: %d, Command: %s, Status: %02x %02x\\r\"\n % (n, scommand, sw1, sw2)\n )\n self.Continue = (\n sw1 == 0x98 and sw2 == 0x08\n ) # if invalid pin then c=True\n self.Found = not self.Continue\n\n if sw2 == 0x40: # status for card blocked\n print(\"[!] Card blocked, check PUK!...\")\n # self.runtime()\n # sys.exit(-1)\n self.stoping = True\n else:\n # c = False\n self.stoping = True\n\n if self.Found: # Status for successful attack\n print(\"\\n\\n[*] The PIN is: [ %d ]!!!\" % n)\n self.stoping = True\n\n if self.wait != None:\n time.sleep(waittime)\n\n if self.reset: # reset the chip\n self.ResetChip()\n\n n += 1\n\n if self.stoping == True:\n self.runtime() # prints runtime information\n self.Continue = False", "def get_digits(num: int) -> str:\n return \"\".join(sorted(str(num**3)))", "def digits(x):\n \n if type(x) != int: \n print(\"ERROR <- x in factorial(x) is not type int\")\n return\n \n return [int(i) for i in list(str(x))]", "def val( digits ):\n v= 0\n p= 1\n for d in digits:\n v += d*p\n p *= 10\n return v", "def binarify(num):\n if num<=0: return '0'\n digits = []" ]
[ "0.65546817", "0.59906715", "0.5840187", "0.5697412", "0.5486693", "0.5443754", "0.54010296", "0.53635895", "0.5285393", "0.5280933", "0.52720773", "0.523553", "0.52265537", "0.5088144", "0.5034664", "0.5028315", "0.5027332", "0.49999142", "0.49928743", "0.49638876", "0.49599412", "0.4959841", "0.4929934", "0.49020585", "0.49010187", "0.4890594", "0.48850808", "0.48737374", "0.48631418", "0.48610917" ]
0.784853
0
Method to filter Polygons by user and order from newer to older
def get_queryset(self): return ProviderPolygon.objects.filter( user=self.kwargs['pk']).order_by('-id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n return ProviderPolygon.objects.filter(user=self.kwargs['pk_user'])", "def distance_user_point_to_polygons(user_point, polygon_list):\n list_polygons_distances = []\n\n for polygon in polygon_list:\n dist = user_point.distance(polygon)\n list_polygons_distances.append(dist)\n\n #return sorted(list_polygons_distances, key=lambda x: x[1], reverse=True)\n return list_polygons_distances", "def _select_polygons(self):\r\n selected_polys_layer = \"SelectedPolys_\" + self.job_id\r\n if self.facility_id is None:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" IS NULL\"\r\n else:\r\n facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + \" = \" + \\\r\n str(self.facility_id)\r\n query = facility_query + \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, FROM_BREAK_FIELD) + \" = \" + str(self.from_break) + \\\r\n \" AND \" + \\\r\n arcpy.AddFieldDelimiters(self.time_lapse_polygons, TO_BREAK_FIELD) + \" = \" + str(self.to_break)\r\n arcpy.management.MakeFeatureLayer(self.time_lapse_polygons, selected_polys_layer, where_clause=query)\r\n self.logger.info(\r\n f\"{int(arcpy.management.GetCount(selected_polys_layer).getOutput(0))} time lapse polygons selected.\")\r\n return selected_polys_layer", "def filter_by_ordering_availability_for_user(self, user):\n\n queryset = self.filter(\n state__in=[\n self.model.States.ACTIVE,\n self.model.States.PAUSED,\n ]\n )\n\n if user.is_anonymous:\n if not settings.WALDUR_MARKETPLACE['ANONYMOUS_USER_CAN_VIEW_OFFERINGS']:\n return self.none()\n else:\n return queryset.filter(shared=True)\n\n if user.is_staff or user.is_support:\n plans = models.Plan.objects.filter(archived=False)\n return queryset.filter(\n Q(shared=True) | Q(plans__in=plans) | Q(parent__plans__in=plans)\n ).distinct()\n\n # filtering by available plans\n divisions = user.divisions\n plans = models.Plan.objects.filter(\n Q(divisions__isnull=True) | Q(divisions__in=divisions)\n ).filter(archived=False)\n\n # filtering by customers and projects\n connected_customers = structure_models.Customer.objects.all().filter(\n permissions__user=user, permissions__is_active=True\n )\n connected_projects = structure_models.Project.available_objects.all().filter(\n permissions__user=user, permissions__is_active=True\n )\n\n return queryset.filter(\n Q(shared=True)\n | (\n (\n Q(customer__in=connected_customers)\n | Q(project__in=connected_projects)\n | Q(permissions__user=user, permissions__is_active=True)\n )\n & (Q(plans__in=plans) | Q(parent__plans__in=plans))\n )\n ).distinct()", "def remove_polygons(self, test):\n filtered_polys = []\n for element in self.polygons:\n pld = [(poly, l, dt) for poly, l, dt in zip(element.polygons, element.layers, element.datatypes)\n if not test(poly, l, dt)]\n if len(pld) == 0:\n pass # we don't need no empty polygons!\n else:\n polys, layers, dts = zip(*pld)\n element.polygons = polys\n element.layers = layers\n element.datatypes = dts\n filtered_polys.append(element)\n self.polygons = filtered_polys\n return self", "def filter_deliveries(restriction):\n data = build_data([\n d for d in current_user.deliveries if restriction(d)])\n # Sort by date\n return sorted(data, key=lambda x: x['toDate'])", "def adjust_filter(self, queryset):\n new_queryset = queryset.filter(points__gt=0.0).order_by('-points', 'rate')\n return self.validate_queryset(new_queryset)", "def get_geojson_potholes(active=True, date=None):\n potholes = VwPothole.objects.all() if date is None \\\n else VwPothole.objects.raw(vw_pothole_by_date, {'datetime': '{} 23:59:59'.format(date)})\n\n pothole_features = [Feature(\n geometry=Point((float(pothole.lon), float(pothole.lat)), precision=8),\n id=pothole.id,\n properties={\"pothole_reports\": int(pothole.pothole_reports),\n \"fixed_reports\": int(pothole.fixed_reports),\n \"create_date\": str(pothole.create_date),\n \"effective_date\": str(pothole.effective_date),\n \"active\": pothole.effective_date is not None\n and convert_timestamp(pothole.effective_date) < datetime.utcnow()\n and convert_timestamp(pothole.fixed_date) > datetime.utcnow(),\n \"fixed_date\": str(pothole.fixed_date),\n \"fixed\": pothole.fixed_date and convert_timestamp(pothole.fixed_date) < datetime.utcnow(),\n \"severity\": str(('%f' % round(pothole.avg_severity, 2)).rstrip('.0')),\n \"utcnow\": str(datetime.utcnow())\n })\n for pothole in potholes\n # filter according to whether active is true\n if (active\n and pothole.effective_date is not None\n and convert_timestamp(pothole.fixed_date) > datetime.utcnow()\n and convert_timestamp(pothole.effective_date) < datetime.utcnow())\n or not active]\n\n pothole_collection = FeatureCollection(pothole_features)\n return dumps(pothole_collection)", "def get_shape_filter(shapefile):\n with fiona.open(shapefile) as collection:\n shp = collection[0]['geometry']\n project = partial(\n pyproj.transform,\n pyproj.Proj(init=collection.crs['init']),\n pyproj.Proj(init='epsg:4326'))\n shp = transform(project, shape(shp))\n\n def filter_function(item):\n if item['properties'].get('available'):\n return True\n return shp.intersects(shape(item['geometry']))\n\n return filter_function", "def recently_active_tuples(self, user=None, days_cutoff=3):\n cutoff_date = datetime.utcnow() - timedelta(days=days_cutoff)\n readable_active_tuples = [(g, g.blog.num_active_items(days=days_cutoff, consider_comments=True)[0], g.wiki.num_active_pages(days=days_cutoff)[0]) for g in self.active_groups() if g.can_read(user) and g.watchable_last_change() > cutoff_date] \n sorted_group_tuples = qon.util.sort_list(readable_active_tuples, lambda x: x[0].watchable_last_change())\n return sorted_group_tuples", "def get_visible_field_polygons(self):\n pass", "def generatePolygons():", "def order_filter(self,elements):", "def _filter_events_for_client(self, user_id, events, is_peeking=False):\n types = (\n (EventTypes.RoomHistoryVisibility, \"\"),\n (EventTypes.Member, user_id),\n )\n event_id_to_state = yield self.store.get_state_for_events(\n frozenset(e.event_id for e in events),\n types=types\n )\n res = yield self.filter_events_for_clients(\n [(user_id, is_peeking)], events, event_id_to_state\n )\n defer.returnValue(res.get(user_id, []))", "def get_queryset(self):\n\t\tleague = League.objects.filter(participants=self.request.user)\n\t\treturn Team.objects.filter(league=league).order_by('-team_points').reverse()", "def create_filtered_point_ugrid(ugrid, nids, nids2):\n #unused_pointsu = ugrid.GetPoints()\n output_data = ugrid.GetPoints().GetData()\n points_array = vtk_to_numpy(output_data) # yeah!\n\n isort_nids = np.argsort(nids)\n nids = nids[isort_nids]\n inids = np.searchsorted(nids, nids2)\n\n points_array_sorted = points_array[isort_nids, :]\n point_array2 = points_array_sorted[inids, :]\n points2 = numpy_to_vtk_points(point_array2)\n\n npoints = len(nids2)\n ugrid = create_unstructured_point_grid(points2, npoints)\n return ugrid", "def sort(self):\n tmp = list(zip(self.user_points, self.user_ids));\n tmp = sorted(tmp, reverse=True);\n self.user_points, self.user_ids = list(zip(*tmp));\n \n self.user_points = list(self.user_points);\n self.user_ids = list(self.user_ids);", "def by_user(cls, user, start_time=None, end_time=None):\n query = cls.objects.filter(user_id=user.pk)\n if start_time:\n query = query.filter(timestamp__gte=start_time)\n if end_time:\n query = query.filter(timestamp__lt=end_time)\n return query.order_by('timestamp')", "def users_view():\n users = get_users()\n data = get_data()\n result = [{'user_id': i, 'name': users[i]}\n for i in users.keys() if int(i) in data.keys()]\n #import pdb; pdb.set_trace()\n result.sort(key=lambda item: item['name'], cmp=locale.strcoll)\n return result", "def _geofilter(frame):\r\n try:\r\n import geopandas as gpd\r\n\r\n # Remove rows with no latitude and longitude\r\n try:\r\n\r\n filresults = frame[(frame['ActionGeo_Lat'].notnull()\r\n ) | (frame['ActionGeo_Long'].notnull()\r\n )]\r\n except:\r\n\r\n filresults = frame[(frame['actiongeolat'].notnull()\r\n ) | (frame['actiongeolong'].notnull()\r\n )]\r\n gdf = gpd.GeoDataFrame(filresults.assign(geometry=_parallelize_dataframe(filresults)),\r\n crs={'init': 'epsg:4326'})\r\n gdf.columns = list(map(lambda x: (x.replace('_', \"\")).lower(), gdf.columns))\r\n\r\n final = gdf[gdf.geometry.notnull()]\r\n\r\n return final\r\n\r\n\r\n except BaseException as err: # pragma: no cover\r\n if str(err) == \"import of 'geopandas' halted; None in sys.modules\":\r\n raise ImportError(\"geopandas is not installed. gdeltPyR needs\"\r\n \" geopandas to export as shapefile. Visit http://\"\r\n \"geopandas.org/install.html for instructions.\")\r\n else:\r\n raise ValueError(\"geopandas is installed but the geospatial \"\r\n \"functions failed for some other reason. Review\"\r\n \" the stack trace to see where the failure \"\r\n \"occurred.\")", "def plygons(self, request, pk=None):\n shp = self.get_object()\n polygons = shp.multipolygonfeatures_set.all()\n '''\n pagination of the geojson to reduce loading time\n '''\n paginator = GeoJsonPagination()\n paginator.page_size = 100\n page = paginator.paginate_queryset(polygons, request)\n if page is not None:\n serializer = polygonSerializer(page, many=True)\n return paginator.get_paginated_response(serializer.data)\n serializer = polygonSerializer(data=polygons, many=True)\n serializer.is_valid()\n return Response(serializer.data)", "def example_staypoints():\n p1 = Point(8.5067847, 47.4)\n p2 = Point(8.5067847, 47.5)\n p3 = Point(8.5067847, 47.6)\n p4 = Point(8.5067847, 47.7)\n\n t1 = pd.Timestamp(\"1971-01-01 00:00:00\", tz=\"utc\")\n t2 = pd.Timestamp(\"1971-01-01 05:00:00\", tz=\"utc\")\n t3 = pd.Timestamp(\"1971-01-02 07:00:00\", tz=\"utc\")\n t4 = pd.Timestamp(\"1971-01-02 08:00:00\", tz=\"utc\")\n t5 = pd.Timestamp(\"1971-01-02 09:00:00\", tz=\"utc\")\n t6 = pd.Timestamp(\"1971-01-02 10:00:00\", tz=\"utc\")\n\n list_dict = [\n {\"id\": 1, \"user_id\": 0, \"started_at\": t1, \"finished_at\": t2, \"geom\": p1},\n {\"id\": 5, \"user_id\": 0, \"started_at\": t2, \"finished_at\": t3, \"geom\": p2},\n {\"id\": 2, \"user_id\": 0, \"started_at\": t3, \"finished_at\": t4, \"geom\": p3},\n {\"id\": 6, \"user_id\": 0, \"started_at\": t4, \"finished_at\": t5, \"geom\": p2},\n {\"id\": 15, \"user_id\": 0, \"started_at\": t5, \"finished_at\": t6, \"geom\": p1},\n {\"id\": 7, \"user_id\": 1, \"started_at\": t3, \"finished_at\": t4, \"geom\": p4},\n {\"id\": 80, \"user_id\": 1, \"started_at\": t4, \"finished_at\": t5, \"geom\": p2},\n {\"id\": 3, \"user_id\": 1, \"started_at\": t5, \"finished_at\": t6, \"geom\": p2},\n ]\n sp = gpd.GeoDataFrame(data=list_dict, geometry=\"geom\", crs=\"EPSG:4326\")\n sp = sp.set_index(\"id\")\n sp.as_staypoints\n return sp", "def filter_data_points(unfiltered_revision_data_points):\n allowed_revision_data_points = {} # {revision : [BenchDataPoints]}\n ignored_revision_data_points = {} # {revision : [BenchDataPoints]}\n revisions = unfiltered_revision_data_points.keys()\n revisions.sort()\n for revision in revisions:\n for point in unfiltered_revision_data_points[revision]:\n if point.time < MIN_REASONABLE_TIME or point.time > MAX_REASONABLE_TIME:\n add_to_revision_data_points(point, revision, ignored_revision_data_points)\n else:\n add_to_revision_data_points(point, revision, allowed_revision_data_points)\n return (allowed_revision_data_points, ignored_revision_data_points)", "def Filter(self, info):\n\n add_item = True\n \n if add_item and (self.use_only_ids != None) and (uid not in self.use_only_ids):\n add_item = False\n \n if add_item and (info['latlong'] == None):\n add_item = False\n \n if add_item and (self.size != None) and (info['size'] < self.size):\n add_item = False\n \n if add_item and (self.expert_rank != None) and (info['expert_rank'] < self.expert_rank):\n add_item = False\n \n if add_item and (self.kosher != None) and (self.kosher == True and info['kosher'] == False):\n add_item = False\n \n if add_item and (self.visiting_center != None) and (self.visiting_center == True and info['visiting_center'] == False):\n add_item = False\n \n if add_item and (self.visiting_center_free_admission != None) and (self.visiting_center_free_admission == True and info['visiting_center_free_admission'] == False):\n add_item = False\n \n if add_item and self.visit_time != None:\n day_of_visit = time.strftime(\"%A\", time.localtime(self.visit_time)).lower()\n if info['hours'][day_of_visit] != None:\n closing_at_that_day = time.mktime(time.strptime(time.strftime(\"%A, %d %b %Y\", time.localtime(self.visit_time)) + \" %d:00:00\" % (info['hours'][day_of_visit]), \"%A, %d %b %Y %H:%M:%S\"))\n if self.visit_time > (closing_at_that_day - self._delta_time_before_close):\n add_item = False\n if day_of_visit == 'saturday' and self.kosher == True:\n add_item = False\n \n if add_item and self.use_weather: \n if not self.weather_client.GoodForWinery(self.weather_client.GetCondition(info['latlong'])):\n add_item = False\n \n return add_item", "def filter_peaks(xCar, xDate, xDir, xFilename, outFolder, buffer='30', whichpass=0):\n ## NECESSARY MODULES\n import pandas as pd #\n import geopandas as gpd\n import shutil\n from datetime import datetime\n from shapely.geometry import Point\n buffer = float(buffer)\n\n # MOVING THE FILES NECESSARY & CREATING NEW FILES\n file_loc = xDir + xFilename\n new_loc = outFolder + \"Filtered\" + xFilename\n new_loc_json = new_loc[:-3] + 'geojson'\n\n oldInfo = xDir + 'Peaks_' + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n newInfo = outFolder + 'FilteredPeaks_' + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n shutil.copy(oldInfo, newInfo)\n datFram = pd.read_csv(file_loc) # READING IN THE FILE\n #datFram = datFram_original.drop_duplicates('OP_NUM')\n\n if datFram.shape[0] == 0: # IF THE OBSERVED PEAK FILE WAS EMPTY, MOVE ON\n print(\"Not filtering this file, no peak in it!\")\n return True\n elif datFram.shape[0] == 1: ## IF ONLY HAD ONE OBSERVED PEAK\n datFram_cent = datFram.copy()\n #datFram_cent['OB_CH4_AB'] = datFram.loc[:, 'OB_CH4'].sub(datFram.loc[:, 'OB_CH4_BASELINE'], axis=0)\n maxch4 = datFram_cent.groupby('OP_NUM', as_index=False).OB_CH4_AB.max().rename(\n columns={'OB_CH4_AB': 'pk_maxCH4_AB'})\n\n maxc2h6 = datFram_cent.groupby('OP_NUM', as_index=False).OB_C2H6_AB.max().rename(\n columns={'OB_C2H6_AB': 'pk_maxC2H6_AB'})\n\n datFram_wtLoc = weighted_loc(datFram_cent, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'})\n\n\n datFram_wtLocMax1 = pd.merge(datFram_wtLoc, maxch4, on=['OP_NUM'])\n datFram_wtLocMax = pd.merge(datFram_wtLocMax1, maxc2h6, on=['OP_NUM'])\n\n pass_info = datFram.copy()\n geometry_temp = [Point(lon, lat) for lon, lat in zip(datFram_wtLocMax['pk_LON'], datFram_wtLocMax['pk_LAT'])]\n #crs = {'init': 'epsg:4326'}\n crs = 'EPSG:4326'\n\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n gdf_buff = gdf_buff.to_crs(epsg=32610)\n # gdf_buff['geometry'] = gdf_buff.loc[:,'geometry'].buffer(30)\n gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(buffer)\n gdf_tog = pd.merge(gdf_buff, datFram, on=['OP_NUM'])\n gdf_bind_pks = gdf_buff.copy()\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['numdays'] = 1\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime('%Y-%m-%d'),\n axis=1)\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n together = pd.merge(gdf_pass_pks, gdf_tog, on=['OP_NUM', 'pk_LON', 'pk_LAT',\n 'pk_maxCH4_AB','pk_maxC2H6_AB', 'geometry'])\n together['pass'] = whichpass\n gdf_pass_pks = together.copy()\n\n gdf_pass_pks['pkGEO'] = gdf_pass_pks.loc[:, \"geometry\"]\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n del (gdf_pass_pks['newgeo'])\n gdf_pass_pks['pass'] = whichpass\n\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB','pk_maxC2H6_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n geometry_temp = [Point(lon, lat) for lon, lat in zip(combinedOP1['Overall_LON'], combinedOP1['Overall_LAT'])]\n crs = 'EPSG:4326'\n gdf_OP = gpd.GeoDataFrame(combinedOP1, crs=crs, geometry=geometry_temp)\n gdf_OP = gdf_OP.to_crs(epsg=32610).copy()\n gdf_OP_reduced = gdf_OP.loc[:, ['min_read', 'geometry',\n 'numtimes', 'Overall_LON',\n 'Overall_LAT', 'min_Date',\n 'pk_maxCH4_AB','pk_maxC2H6_AB',\n 'verified']].drop_duplicates().reset_index(drop=True)\n\n\n gdf_OP_reduced.to_file(new_loc_json, driver=\"GeoJSON\")\n #gdf_OP_reduced.to_file('op.geojson', driver=\"GeoJSON\")\n\n gdf_OP_wrecombine = pd.merge(gdf_OP.drop(columns=['geometry']),\n gdf_pass_pks.drop(columns=['geometry']),\n on=['min_read', 'min_Date', 'numtimes',\n 'pass', 'verified', 'pk_LAT',\n 'pk_LON','OP_NUM', 'pk_maxCH4_AB',\n 'pk_maxC2H6_AB'])\n gdf_OP_wrecombine.to_csv(new_loc, index=False)\n\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n unique_peaks = gdf_pass_pks.loc[:, ['OP_NUM', 'pk_LAT',\n 'pk_LON', 'min_read', 'min_Date']].drop_duplicates()\n unique_peaks['save'] = True\n good_pks = list(unique_peaks.index)\n\n def get_thing(index):\n if index in good_pks:\n return True\n else:\n return False\n\n gdf_pass_pks['wooind'] = gdf_pass_pks.index\n gdf_pass_pks['save'] = gdf_pass_pks.apply(lambda x: get_thing(x.wooind), axis=1)\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n\n ## adding option to add in the overall lat and lon if there is shape 1\n\n unique_pks_tog['Overall_LON'] = unique_pks_tog.loc[:,'pk_LON']\n unique_pks_tog['Overall_LAT'] = unique_pks_tog.loc[:,'pk_LAT']\n\n unique_pks_tog_stripped = unique_pks_tog.loc[:,\n ['OP_NUM', 'pk_LAT','pk_LON', 'pkGEO','pk_maxCH4_AB', 'pk_maxC2H6_AB', 'geometry',\n 'min_read', 'numtimes', 'numdays', 'recombine', 'pk_Dates', 'min_Date',\n 'verified','Latitude','Longitude','Overall_LON','Overall_LAT','wooind','save','pass',\n ]]\n unique_pk_names = unique_pks_tog.OP_NUM.drop_duplicates().tolist()\n unique_all = datFram.loc[datFram['OP_NUM'].isin(unique_pk_names), :]\n finaldf = pd.merge(unique_pks_tog_stripped, unique_all, on='OP_NUM')\n #unique_pks_tog.to_csv(new_loc, index=False)\n finaldf.to_csv(new_loc, index=False)\n\n #unique_pks_tog.to_csv(new_loc, index=False)\n\n # return(gdf_OP_wrecombine)\n\n elif datFram.shape[0] != 1:\n datFram_cent = datFram.copy()\n ### MAXCH4 is a df with the max methane (above baseline) in the given observed peak\n maxch4 = datFram_cent.groupby('OP_NUM', as_index=False).OB_CH4_AB.max().rename(\n columns={'OB_CH4_AB': 'pk_maxCH4_AB'})\n maxc2h6 = datFram_cent.groupby('OP_NUM', as_index=False).OB_C2H6_AB.max().rename(\n columns={'OB_C2H6_AB': 'pk_maxC2H6_AB'})\n ### FINDING WEIGHTED LOCATION OF THE OP, BY THE ABOVE BASELINE CH4 LEVEL\n datFram_wtLoc = weighted_loc(datFram_cent, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'})\n # datFram_wtLoc = weighted_loc(datFram_cent,'LAT','LON','PEAK_NUM','CH4_AB').rename(columns = {'LAT':'pk_LAT','LON':'pk_LON'}).copy()\n datFram_wtLocMax1 = pd.merge(datFram_wtLoc, maxch4, on=['OP_NUM'])\n datFram_wtLocMax = pd.merge(datFram_wtLocMax1, maxc2h6, on=['OP_NUM'])\n pass_info = datFram.copy()\n geometry_temp = [Point(lon, lat) for lon, lat in zip(datFram_wtLocMax['pk_LON'], datFram_wtLocMax['pk_LAT'])]\n crs = 'EPSG:4326'\n\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' OF 30 M\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n gdf_buff = gdf_buff.to_crs(epsg=32610)\n # gdf_buff['geometry'] = gdf_buff.loc[:,'geometry'].buffer(30)\n gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(buffer)\n gdf_tog = pd.merge(gdf_buff, datFram, on=['OP_NUM'])\n gdf_bind_pks = gdf_buff.copy()\n\n if gdf_bind_pks.shape[0] > 1:\n data_overlap = gpd.GeoDataFrame(crs=gdf_bind_pks.crs)\n data_temp = gdf_bind_pks.copy()\n data_temp = data_temp.to_crs(epsg=32610)\n\n for index, row in data_temp.iterrows():\n data_temp1 = data_temp.loc[data_temp.OP_NUM != row.OP_NUM, :]\n data_temp1 = data_temp1.to_crs(epsg=32610)\n\n # check if intersection occured\n overlaps = data_temp1[data_temp1.geometry.overlaps(row.geometry)]['OP_NUM'].tolist()\n if len(overlaps) > 0:\n\n # compare the area with threshold\n for y in overlaps:\n temp_area = gpd.overlay(data_temp.loc[data_temp.OP_NUM == y,],\n data_temp.loc[data_temp.OP_NUM == row.OP_NUM,], how='intersection')\n temp_area = temp_area.loc[temp_area.geometry.area >= 0.001]\n if temp_area.shape[0] > 0:\n temp_union = gpd.overlay(data_temp.loc[data_temp.OP_NUM == y,],\n data_temp.loc[data_temp.OP_NUM == row.OP_NUM,], how='union')\n data_overlap = gpd.GeoDataFrame(pd.concat([temp_union, data_overlap], ignore_index=True),\n crs=data_temp.crs)\n if data_overlap.size > 0:\n firstnull2 = data_overlap.loc[data_overlap.OP_NUM_1.isnull(), :]\n firstnull = firstnull2.copy()\n firstnull.loc[:, 'OP_NUM_1'] = firstnull2.loc[:, 'OP_NUM_2']\n\n secnull2 = data_overlap.loc[data_overlap.OP_NUM_2.isnull(), :]\n\n secnull = secnull2.copy()\n secnull.loc[:, 'OP_NUM_2'] = secnull2.loc[:, 'OP_NUM_1']\n\n withoutNA = data_overlap.copy().dropna()\n allTog2 = pd.concat([firstnull, secnull, withoutNA]).reset_index().copy()\n\n allTog2['notsame'] = allTog2.apply(lambda x: x.OP_NUM_1 == x.OP_NUM_2, axis=1)\n allTog = allTog2.loc[allTog2.notsame == False, :].drop(columns=['notsame'])\n\n over = allTog.copy()\n over['sorted'] = over.apply(lambda y: sorted([y['OP_NUM_1'], y['OP_NUM_2']]), axis=1)\n over['sorted'] = over.sorted.apply(lambda y: ''.join(y))\n over = over.drop_duplicates('sorted')\n over['combined'] = [list(x) for x in list(over.loc[:, ['OP_NUM_1', 'OP_NUM_2']].to_numpy())]\n # over['date1'] = over.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM_1'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n # over['date2'] = over.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM_2'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n over['date1'] = over.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM_1[len(xCar) + 1:x.OP_NUM_1.find('.')])).strftime(\n '%Y-%m-%d'),\n axis=1)\n over['date2'] = over.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM_2[len(xCar) + 1:x.OP_NUM_2.find('.')])).strftime(\n '%Y-%m-%d'),\n axis=1)\n\n def unique(list1):\n # intilize a null list\n unique_list = []\n # traverse for all elements\n for x in list1:\n # check if exists in unique_list or not\n if x not in unique_list:\n unique_list.append(x)\n return (unique_list)\n\n over['dates'] = [list(x) for x in list(over.loc[:, ['date1', 'date2']].to_numpy())]\n over['pk_Dates'] = over.apply(lambda x: unique(x.dates), axis=1)\n over = over.drop(columns=['dates'])\n\n over['VER_NUM'] = over.apply(lambda y: y.combined, axis=1)\n over['min_val'] = over.apply(lambda y: min(y.combined), axis=1)\n over2 = over.reset_index().loc[:,\n ['OP_NUM_1', 'OP_NUM_2', 'geometry', 'combined', 'min_val', 'pk_Dates']]\n\n overcop = over2.copy().rename(columns={'combined': 'recombine'})\n # overcop.loc[:,'recombine'] = overcop.loc[:,'combined']\n\n for index, row in overcop.iterrows():\n united = row.recombine\n undate = row.pk_Dates\n for index2, row2 in overcop.iterrows():\n united_temp = unIfInt(united, row2.recombine)\n undate_temp = unIfInt(undate, row2.pk_Dates)\n if united_temp != None:\n united = united_temp\n if undate_temp != None:\n undate = undate_temp\n overcop.at[index, 'recombine'] = united.copy()\n overcop.at[index, 'pk_Dates'] = undate.copy()\n\n del (united)\n del (undate)\n\n overcop['recombine'] = overcop.apply(lambda y: sorted(y.recombine), axis=1).copy()\n overcop['pk_Dates'] = overcop.apply(lambda y: sorted(y.pk_Dates), axis=1).copy()\n overcop['min_read'] = overcop.apply(lambda y: min(y.recombine), axis=1).copy()\n overcop['min_Date'] = overcop.apply(lambda y: min(y.pk_Dates), axis=1).copy()\n\n newOverlap = overcop.dissolve(by='min_read', as_index=False).loc[:,\n ['min_read', 'geometry', 'recombine', 'min_Date', 'pk_Dates']].copy()\n\n combined = gdf_bind_pks.copy()\n combined['recombine'] = [list(x) for x in list(combined.loc[:, ['OP_NUM']].to_numpy())]\n # combined['dates'] = combined.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n combined['dates'] = combined.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime(\n '%Y-%m-%d'), axis=1)\n\n combined['pk_Dates'] = [list(x) for x in list(combined.loc[:, ['dates']].to_numpy())]\n combined['min_Date'] = combined.loc[:, 'dates']\n combined['numtimes'] = 1\n combined['newgeo'] = combined.loc[:, 'geometry']\n combined['min_read'] = combined.loc[:, \"OP_NUM\"]\n\n for index, row in combined.iterrows():\n for index2, row2 in newOverlap.iterrows():\n if row.OP_NUM in row2.recombine:\n combined.at[index, 'recombine'] = row2.recombine.copy()\n # combined.at[index, 'newgeo'] = row2.copy().geometry\n combined.at[index, 'min_read'] = row2.copy().min_read\n combined.at[index, 'pk_Dates'] = row2.pk_Dates\n combined.at[index, 'min_Date'] = row2.min_Date\n\n # combined['numtimes'] = combined.apply(lambda y: len(y.recombine),axis = 1).copy()\n combined['numtimes'] = combined.apply(lambda x: count_times(x.recombine, xCar), axis=1)\n\n combined['numdays'] = combined.apply(lambda y: len(y.pk_Dates), axis=1).copy()\n combined_reduced = combined.loc[:,\n ['OP_NUM', 'newgeo', 'recombine', 'numtimes', 'min_read', 'numdays', 'pk_Dates',\n 'min_Date']]\n gdf_pass_pks = pd.merge(gdf_tog, combined_reduced, on=['OP_NUM']).copy()\n gdf_pass_pks['verified'] = gdf_pass_pks.apply(lambda y: (True if y.numtimes > 1 else False),\n axis=1).copy()\n if data_overlap.size == 0:\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['numdays'] = 1\n\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n # gdf_pass_pks['dates'] = gdf_pass_pks.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime(\n '%Y-%m-%d'), axis=1)\n\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n # gdf_pass_pks['oldgeo'] = gdf_pass_pks.loc[:,'geometry']\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n together = pd.merge(gdf_pass_pks, gdf_tog,\n on=['OP_NUM', 'pk_LON', 'pk_LAT', 'pk_maxCH4_AB','pk_maxC2H6_AB', 'geometry'])\n together['pass'] = whichpass\n gdf_pass_pks = together.copy()\n\n if gdf_bind_pks.shape[0] == 1:\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n # gdf_pass_pks['dates'] = gdf_pass_pks.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime('%Y-%m-%d'),\n axis=1)\n\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks['numdays'] = 1\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n epdat = pass_info.loc[:, ['OP_NUM', 'OP_EPOCHSTART']]\n gdf_pass_pks = pd.merge(gdf_pass_pks, epdat, on=['OP_NUM']).copy()\n data_overlap = pd.DataFrame(columns=['what', 'oh'])\n\n#####sot\n gdf_pass_pks['pkGEO'] = gdf_pass_pks.loc[:, \"geometry\"]\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n del (gdf_pass_pks['newgeo'])\n gdf_pass_pks['pass'] = whichpass\n gdf_pass_pks['Overall_LON'] = gdf_pass_pks['pk_LON']\n gdf_pass_pks['Overall_LAT'] = gdf_pass_pks['pk_LAT']\n combinedOP1 = gdf_pass_pks.drop(columns=['recombine', 'pk_Dates']).drop_duplicates()\n\n if data_overlap.size != 0:\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n\n if data_overlap.size == 0 and gdf_bind_pks.shape[0] != 1:\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB','pk_maxC2H6_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n\n geometry_temp = [Point(lon, lat) for lon, lat in zip(combinedOP1['Overall_LON'], combinedOP1['Overall_LAT'])]\n\n crs = 'EPSG:4326'\n gdf_OP = gpd.GeoDataFrame(combinedOP1, crs=crs, geometry=geometry_temp)\n #gdf_OP = gdf_OP.to_crs(epsg=32610).copy()\n\n gdf_OP_reduced = gdf_OP.loc[:, ['min_read', 'geometry', 'numtimes', 'Overall_LON', 'Overall_LAT', 'min_Date',\n 'verified']].drop_duplicates().reset_index(drop=True)\n gdf_OP_reduced.to_file(new_loc_json, driver=\"GeoJSON\")\n gdf_OP_wrecombine = pd.merge(gdf_OP.drop(columns=['geometry']), gdf_pass_pks.drop(columns=['geometry']),\n on=['min_read', 'min_Date', 'numtimes', 'pass', 'verified', 'pk_LAT', 'pk_LON',\n 'OP_NUM', 'pk_maxCH4_AB'])\n\n gdf_OP_wrecombine.to_csv(new_loc, index=False)\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n unique_peaks = gdf_pass_pks.loc[:, ['OP_NUM', 'pk_LAT', 'pk_LON', 'min_read', 'min_Date']].drop_duplicates()\n unique_peaks['save'] = True\n good_pks = list(unique_peaks.index)\n\n### back2 here\n\n def get_thing(index):\n if index in good_pks:\n return True\n else:\n return False\n\n gdf_pass_pks['wooind'] = gdf_pass_pks.index\n gdf_pass_pks['save'] = gdf_pass_pks.apply(lambda x: get_thing(x.wooind), axis=1)\n\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['minElevated'] = datFram.minElevated[0]\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n unique_pks_tog_stripped = unique_pks_tog.loc[:,\n ['OP_NUM', 'pk_LAT','pk_LON', 'pkGEO','pk_maxCH4_AB', 'pk_maxC2H6_AB', 'geometry',\n 'min_read', 'numtimes', 'numdays', 'recombine', 'pk_Dates', 'min_Date',\n 'verified','Latitude','Longitude','Overall_LON','Overall_LAT','wooind','save','pass'\n ]]\n unique_pk_names = unique_pks_tog.OP_NUM.drop_duplicates().tolist()\n unique_all = datFram.loc[datFram['OP_NUM'].isin(unique_pk_names), :]\n\n finaldf = pd.merge(unique_pks_tog_stripped, unique_all, on='OP_NUM')\n #unique_pks_tog.to_csv(new_loc, index=False)\n finaldf.to_csv(new_loc, index=False)\n return", "def filter_peaks_old(xCar, xDate, xDir, xFilename, outFolder, buffer='30', whichpass=0):\n ## NECESSARY MODULES\n import pandas as pd #\n import geopandas as gpd\n import shutil\n from datetime import datetime\n from shapely.geometry import Point\n buffer = float(buffer)\n\n # MOVING THE FILES NECESSARY & CREATING NEW FILES\n file_loc = xDir + xFilename\n new_loc = outFolder + \"Filtered\" + xFilename\n new_loc_json = new_loc[:-3] + 'geojson'\n\n oldInfo = xDir + 'Peaks_' + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n newInfo = outFolder + 'FilteredPeaks_' + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n shutil.copy(oldInfo, newInfo)\n datFram = pd.read_csv(file_loc) # READING IN THE FILE\n #datFram = datFram_original.drop_duplicates('OP_NUM')\n\n if datFram.shape[0] == 0: # IF THE OBSERVED PEAK FILE WAS EMPTY, MOVE ON\n print(\"Not filtering this file, no peak in it!\")\n elif datFram.shape[0] == 1: ## IF ONLY HAD ONE OBSERVED PEAK\n datFram_cent = datFram.copy()\n #datFram_cent['OB_CH4_AB'] = datFram.loc[:, 'OB_CH4'].sub(datFram.loc[:, 'OB_CH4_BASELINE'], axis=0)\n maxch4 = datFram_cent.groupby('OP_NUM', as_index=False).OB_CH4_AB.max().rename(\n columns={'OB_CH4_AB': 'pk_maxCH4_AB'})\n\n maxc2h6 = datFram_cent.groupby('OP_NUM', as_index=False).OB_C2H6_AB.max().rename(\n columns={'OB_C2H6_AB': 'pk_maxC2H6_AB'})\n\n datFram_wtLoc = weighted_loc(datFram_cent, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'})\n\n\n datFram_wtLocMax1 = pd.merge(datFram_wtLoc, maxch4, on=['OP_NUM'])\n datFram_wtLocMax = pd.merge(datFram_wtLocMax1, maxc2h6, on=['OP_NUM'])\n\n pass_info = datFram.copy()\n geometry_temp = [Point(lon, lat) for lon, lat in zip(datFram_wtLocMax['pk_LON'], datFram_wtLocMax['pk_LAT'])]\n #crs = {'init': 'epsg:4326'}\n crs = 'EPSG:4326'\n\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n gdf_buff = gdf_buff.to_crs(epsg=32610)\n # gdf_buff['geometry'] = gdf_buff.loc[:,'geometry'].buffer(30)\n gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(buffer)\n gdf_tog = pd.merge(gdf_buff, datFram, on=['OP_NUM'])\n gdf_bind_pks = gdf_buff.copy()\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['numdays'] = 1\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime('%Y-%m-%d'),\n axis=1)\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n together = pd.merge(gdf_pass_pks, gdf_tog, on=['OP_NUM', 'pk_LON', 'pk_LAT',\n 'pk_maxCH4_AB','pk_maxC2H6_AB', 'geometry'])\n together['pass'] = whichpass\n gdf_pass_pks = together.copy()\n\n gdf_pass_pks['pkGEO'] = gdf_pass_pks.loc[:, \"geometry\"]\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n del (gdf_pass_pks['newgeo'])\n gdf_pass_pks['pass'] = whichpass\n\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB','pk_maxC2H6_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n geometry_temp = [Point(lon, lat) for lon, lat in zip(combinedOP1['Overall_LON'], combinedOP1['Overall_LAT'])]\n crs = 'EPSG:4326'\n gdf_OP = gpd.GeoDataFrame(combinedOP1, crs=crs, geometry=geometry_temp)\n gdf_OP = gdf_OP.to_crs(epsg=32610).copy()\n gdf_OP_reduced = gdf_OP.loc[:, ['min_read', 'geometry',\n 'numtimes', 'Overall_LON',\n 'Overall_LAT', 'min_Date',\n 'pk_maxCH4_AB','pk_maxC2H6_AB',\n 'verified']].drop_duplicates().reset_index(drop=True)\n\n\n gdf_OP_reduced.to_file(new_loc_json, driver=\"GeoJSON\")\n #gdf_OP_reduced.to_file('op.geojson', driver=\"GeoJSON\")\n\n gdf_OP_wrecombine = pd.merge(gdf_OP.drop(columns=['geometry']),\n gdf_pass_pks.drop(columns=['geometry']),\n on=['min_read', 'min_Date', 'numtimes',\n 'pass', 'verified', 'pk_LAT',\n 'pk_LON','OP_NUM', 'pk_maxCH4_AB',\n 'pk_maxC2H6_AB'])\n gdf_OP_wrecombine.to_csv(new_loc, index=False)\n\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n unique_peaks = gdf_pass_pks.loc[:, ['OP_NUM', 'pk_LAT',\n 'pk_LON', 'min_read', 'min_Date']].drop_duplicates()\n unique_peaks['save'] = True\n good_pks = list(unique_peaks.index)\n\n def get_thing(index):\n if index in good_pks:\n return True\n else:\n return False\n\n gdf_pass_pks['wooind'] = gdf_pass_pks.index\n gdf_pass_pks['save'] = gdf_pass_pks.apply(lambda x: get_thing(x.wooind), axis=1)\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n unique_pks_tog_stripped = unique_pks_tog.loc[:,\n ['OP_NUM', 'pk_LAT','pk_LON', 'pkGEO','pk_maxCH4_AB', 'pk_maxC2H6_AB', 'geometry',\n 'min_read', 'numtimes', 'numdays', 'recombine', 'pk_Dates', 'min_Date',\n 'verified','Latitude','Longitude','Overall_LON','Overall_LAT','wooind','save','pass',\n ]]\n unique_pk_names = unique_pks_tog.OP_NUM.drop_duplicates().tolist()\n unique_all = datFram.loc[datFram['OP_NUM'].isin(unique_pk_names), :]\n finaldf = pd.merge(unique_pks_tog_stripped, unique_all, on='OP_NUM')\n #unique_pks_tog.to_csv(new_loc, index=False)\n finaldf.to_csv(new_loc, index=False)\n\n #unique_pks_tog.to_csv(new_loc, index=False)\n\n # return(gdf_OP_wrecombine)\n\n elif datFram.shape[0] != 1:\n datFram_cent = datFram.copy()\n ### MAXCH4 is a df with the max methane (above baseline) in the given observed peak\n maxch4 = datFram_cent.groupby('OP_NUM', as_index=False).OB_CH4_AB.max().rename(\n columns={'OB_CH4_AB': 'pk_maxCH4_AB'})\n maxc2h6 = datFram_cent.groupby('OP_NUM', as_index=False).OB_C2H6_AB.max().rename(\n columns={'OB_C2H6_AB': 'pk_maxC2H6_AB'})\n ### FINDING WEIGHTED LOCATION OF THE OP, BY THE ABOVE BASELINE CH4 LEVEL\n datFram_wtLoc = weighted_loc(datFram_cent, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB').loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'})\n # datFram_wtLoc = weighted_loc(datFram_cent,'LAT','LON','PEAK_NUM','CH4_AB').rename(columns = {'LAT':'pk_LAT','LON':'pk_LON'}).copy()\n datFram_wtLocMax1 = pd.merge(datFram_wtLoc, maxch4, on=['OP_NUM'])\n datFram_wtLocMax = pd.merge(datFram_wtLocMax1, maxc2h6, on=['OP_NUM'])\n pass_info = datFram.copy()\n geometry_temp = [Point(lon, lat) for lon, lat in zip(datFram_wtLocMax['pk_LON'], datFram_wtLocMax['pk_LAT'])]\n crs = 'EPSG:4326'\n\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' OF 30 M\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n gdf_buff = gdf_buff.to_crs(epsg=32610)\n # gdf_buff['geometry'] = gdf_buff.loc[:,'geometry'].buffer(30)\n gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(buffer)\n gdf_tog = pd.merge(gdf_buff, datFram, on=['OP_NUM'])\n gdf_bind_pks = gdf_buff.copy()\n\n if gdf_bind_pks.shape[0] > 1:\n data_overlap = gpd.GeoDataFrame(crs=gdf_bind_pks.crs)\n data_temp = gdf_bind_pks.copy()\n data_temp = data_temp.to_crs(epsg=32610)\n\n for index, row in data_temp.iterrows():\n data_temp1 = data_temp.loc[data_temp.OP_NUM != row.OP_NUM, :]\n data_temp1 = data_temp1.to_crs(epsg=32610)\n\n # check if intersection occured\n overlaps = data_temp1[data_temp1.geometry.overlaps(row.geometry)]['OP_NUM'].tolist()\n if len(overlaps) > 0:\n\n # compare the area with threshold\n for y in overlaps:\n temp_area = gpd.overlay(data_temp.loc[data_temp.OP_NUM == y,],\n data_temp.loc[data_temp.OP_NUM == row.OP_NUM,], how='intersection')\n temp_area = temp_area.loc[temp_area.geometry.area >= 0.001]\n if temp_area.shape[0] > 0:\n temp_union = gpd.overlay(data_temp.loc[data_temp.OP_NUM == y,],\n data_temp.loc[data_temp.OP_NUM == row.OP_NUM,], how='union')\n data_overlap = gpd.GeoDataFrame(pd.concat([temp_union, data_overlap], ignore_index=True),\n crs=data_temp.crs)\n if data_overlap.size > 0:\n firstnull2 = data_overlap.loc[data_overlap.OP_NUM_1.isnull(), :]\n firstnull = firstnull2.copy()\n firstnull.loc[:, 'OP_NUM_1'] = firstnull2.loc[:, 'OP_NUM_2']\n\n secnull2 = data_overlap.loc[data_overlap.OP_NUM_2.isnull(), :]\n\n secnull = secnull2.copy()\n secnull.loc[:, 'OP_NUM_2'] = secnull2.loc[:, 'OP_NUM_1']\n\n withoutNA = data_overlap.copy().dropna()\n allTog2 = pd.concat([firstnull, secnull, withoutNA]).reset_index().copy()\n\n allTog2['notsame'] = allTog2.apply(lambda x: x.OP_NUM_1 == x.OP_NUM_2, axis=1)\n allTog = allTog2.loc[allTog2.notsame == False, :].drop(columns=['notsame'])\n\n over = allTog.copy()\n over['sorted'] = over.apply(lambda y: sorted([y['OP_NUM_1'], y['OP_NUM_2']]), axis=1)\n over['sorted'] = over.sorted.apply(lambda y: ''.join(y))\n over = over.drop_duplicates('sorted')\n over['combined'] = [list(x) for x in list(over.loc[:, ['OP_NUM_1', 'OP_NUM_2']].to_numpy())]\n # over['date1'] = over.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM_1'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n # over['date2'] = over.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM_2'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n over['date1'] = over.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM_1[len(xCar) + 1:x.OP_NUM_1.find('.')])).strftime(\n '%Y-%m-%d'),\n axis=1)\n over['date2'] = over.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM_2[len(xCar) + 1:x.OP_NUM_2.find('.')])).strftime(\n '%Y-%m-%d'),\n axis=1)\n\n def unique(list1):\n # intilize a null list\n unique_list = []\n # traverse for all elements\n for x in list1:\n # check if exists in unique_list or not\n if x not in unique_list:\n unique_list.append(x)\n return (unique_list)\n\n over['dates'] = [list(x) for x in list(over.loc[:, ['date1', 'date2']].to_numpy())]\n over['pk_Dates'] = over.apply(lambda x: unique(x.dates), axis=1)\n over = over.drop(columns=['dates'])\n\n over['VER_NUM'] = over.apply(lambda y: y.combined, axis=1)\n over['min_val'] = over.apply(lambda y: min(y.combined), axis=1)\n over2 = over.reset_index().loc[:,\n ['OP_NUM_1', 'OP_NUM_2', 'geometry', 'combined', 'min_val', 'pk_Dates']]\n\n overcop = over2.copy().rename(columns={'combined': 'recombine'})\n # overcop.loc[:,'recombine'] = overcop.loc[:,'combined']\n\n for index, row in overcop.iterrows():\n united = row.recombine\n undate = row.pk_Dates\n for index2, row2 in overcop.iterrows():\n united_temp = unIfInt(united, row2.recombine)\n undate_temp = unIfInt(undate, row2.pk_Dates)\n if united_temp != None:\n united = united_temp\n if undate_temp != None:\n undate = undate_temp\n overcop.at[index, 'recombine'] = united.copy()\n overcop.at[index, 'pk_Dates'] = undate.copy()\n\n del (united)\n del (undate)\n\n overcop['recombine'] = overcop.apply(lambda y: sorted(y.recombine), axis=1).copy()\n overcop['pk_Dates'] = overcop.apply(lambda y: sorted(y.pk_Dates), axis=1).copy()\n overcop['min_read'] = overcop.apply(lambda y: min(y.recombine), axis=1).copy()\n overcop['min_Date'] = overcop.apply(lambda y: min(y.pk_Dates), axis=1).copy()\n\n newOverlap = overcop.dissolve(by='min_read', as_index=False).loc[:,\n ['min_read', 'geometry', 'recombine', 'min_Date', 'pk_Dates']].copy()\n\n combined = gdf_bind_pks.copy()\n combined['recombine'] = [list(x) for x in list(combined.loc[:, ['OP_NUM']].to_numpy())]\n # combined['dates'] = combined.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n combined['dates'] = combined.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime(\n '%Y-%m-%d'), axis=1)\n\n combined['pk_Dates'] = [list(x) for x in list(combined.loc[:, ['dates']].to_numpy())]\n combined['min_Date'] = combined.loc[:, 'dates']\n combined['numtimes'] = 1\n combined['newgeo'] = combined.loc[:, 'geometry']\n combined['min_read'] = combined.loc[:, \"OP_NUM\"]\n\n for index, row in combined.iterrows():\n for index2, row2 in newOverlap.iterrows():\n if row.OP_NUM in row2.recombine:\n combined.at[index, 'recombine'] = row2.recombine.copy()\n # combined.at[index, 'newgeo'] = row2.copy().geometry\n combined.at[index, 'min_read'] = row2.copy().min_read\n combined.at[index, 'pk_Dates'] = row2.pk_Dates\n combined.at[index, 'min_Date'] = row2.min_Date\n\n # combined['numtimes'] = combined.apply(lambda y: len(y.recombine),axis = 1).copy()\n combined['numtimes'] = combined.apply(lambda x: count_times(x.recombine, xCar), axis=1)\n\n combined['numdays'] = combined.apply(lambda y: len(y.pk_Dates), axis=1).copy()\n combined_reduced = combined.loc[:,\n ['OP_NUM', 'newgeo', 'recombine', 'numtimes', 'min_read', 'numdays', 'pk_Dates',\n 'min_Date']]\n gdf_pass_pks = pd.merge(gdf_tog, combined_reduced, on=['OP_NUM']).copy()\n gdf_pass_pks['verified'] = gdf_pass_pks.apply(lambda y: (True if y.numtimes > 1 else False),\n axis=1).copy()\n if data_overlap.size == 0:\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['numdays'] = 1\n\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n # gdf_pass_pks['dates'] = gdf_pass_pks.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime(\n '%Y-%m-%d'), axis=1)\n\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n # gdf_pass_pks['oldgeo'] = gdf_pass_pks.loc[:,'geometry']\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n together = pd.merge(gdf_pass_pks, gdf_tog,\n on=['OP_NUM', 'pk_LON', 'pk_LAT', 'pk_maxCH4_AB','pk_maxC2H6_AB', 'geometry'])\n together['pass'] = whichpass\n gdf_pass_pks = together.copy()\n\n if gdf_bind_pks.shape[0] == 1:\n gdf_pass_pks = gdf_bind_pks.copy()\n gdf_pass_pks['min_read'] = gdf_pass_pks.loc[:, 'OP_NUM']\n gdf_pass_pks['numtimes'] = 1\n gdf_pass_pks['newgeo'] = gdf_pass_pks.loc[:, 'geometry']\n\n gdf_pass_pks['recombine'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['OP_NUM']].to_numpy())].copy()\n # gdf_pass_pks['dates'] = gdf_pass_pks.apply(lambda x: datetime.fromtimestamp(int(x['OP_NUM'][6:-2])).strftime('%Y-%m-%d'),axis=1)\n gdf_pass_pks['dates'] = gdf_pass_pks.apply(\n lambda x: datetime.fromtimestamp(int(x.OP_NUM[len(xCar) + 1:x.OP_NUM.find('.')])).strftime('%Y-%m-%d'),\n axis=1)\n\n gdf_pass_pks['pk_Dates'] = [list(x) for x in list(gdf_pass_pks.loc[:, ['dates']].to_numpy())]\n gdf_pass_pks['min_Date'] = gdf_pass_pks.loc[:, 'dates']\n gdf_pass_pks['numdays'] = 1\n gdf_pass_pks = gdf_pass_pks.drop(columns=['dates'])\n\n gdf_pass_pks['verified'] = False\n epdat = pass_info.loc[:, ['OP_NUM', 'OP_EPOCHSTART']]\n gdf_pass_pks = pd.merge(gdf_pass_pks, epdat, on=['OP_NUM']).copy()\n data_overlap = pd.DataFrame(columns=['what', 'oh'])\n\n#####sot\n gdf_pass_pks['pkGEO'] = gdf_pass_pks.loc[:, \"geometry\"]\n gdf_pass_pks['geometry'] = gdf_pass_pks.loc[:, \"newgeo\"]\n del (gdf_pass_pks['newgeo'])\n gdf_pass_pks['pass'] = whichpass\n gdf_pass_pks['Overall_LON'] = gdf_pass_pks['pk_LON']\n gdf_pass_pks['Overall_LAT'] = gdf_pass_pks['pk_LAT']\n combinedOP1 = gdf_pass_pks.drop(columns=['recombine', 'pk_Dates']).drop_duplicates()\n\n if data_overlap.size != 0:\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n\n if data_overlap.size == 0 and gdf_bind_pks.shape[0] != 1:\n gdf_op_unique = gdf_pass_pks.loc[:,\n ['numtimes', 'min_read', 'numdays', 'min_Date', 'verified', 'pass', 'OB_LON',\n 'OB_LAT']].drop_duplicates()\n gdfcop = gdf_pass_pks.loc[:,\n ['OP_NUM', 'min_read', 'min_Date', 'numtimes', 'verified', 'pass', 'pk_LAT', 'pk_LON',\n 'pk_maxCH4_AB','pk_maxC2H6_AB']].drop_duplicates()\n combinedOP = weighted_loc(gdfcop, 'pk_LAT', 'pk_LON', 'min_read', 'pk_maxCH4_AB').loc[:, :].rename(\n columns={'pk_LAT': 'Overall_LAT', 'pk_LON': 'Overall_LON'}).reset_index(drop=True)\n combinedOP1 = pd.merge(combinedOP, gdfcop, on=['min_read'])\n\n geometry_temp = [Point(lon, lat) for lon, lat in zip(combinedOP1['Overall_LON'], combinedOP1['Overall_LAT'])]\n\n crs = 'EPSG:4326'\n gdf_OP = gpd.GeoDataFrame(combinedOP1, crs=crs, geometry=geometry_temp)\n #gdf_OP = gdf_OP.to_crs(epsg=32610).copy()\n\n gdf_OP_reduced = gdf_OP.loc[:, ['min_read', 'geometry', 'numtimes', 'Overall_LON', 'Overall_LAT', 'min_Date',\n 'verified']].drop_duplicates().reset_index(drop=True)\n gdf_OP_reduced.to_file(new_loc_json, driver=\"GeoJSON\")\n gdf_OP_wrecombine = pd.merge(gdf_OP.drop(columns=['geometry']), gdf_pass_pks.drop(columns=['geometry']),\n on=['min_read', 'min_Date', 'numtimes', 'pass', 'verified', 'pk_LAT', 'pk_LON',\n 'OP_NUM', 'pk_maxCH4_AB'])\n\n gdf_OP_wrecombine.to_csv(new_loc, index=False)\n gdf_buff = gpd.GeoDataFrame(datFram_wtLocMax, crs=crs, geometry=geometry_temp)\n unique_peaks = gdf_pass_pks.loc[:, ['OP_NUM', 'pk_LAT', 'pk_LON', 'min_read', 'min_Date']].drop_duplicates()\n unique_peaks['save'] = True\n good_pks = list(unique_peaks.index)\n\n### back2 here\n\n def get_thing(index):\n if index in good_pks:\n return True\n else:\n return False\n\n gdf_pass_pks['wooind'] = gdf_pass_pks.index\n gdf_pass_pks['save'] = gdf_pass_pks.apply(lambda x: get_thing(x.wooind), axis=1)\n\n unique_pks_tog = gdf_pass_pks.loc[gdf_pass_pks.save == True, :].reset_index(drop=True)\n unique_pks_tog['Latitude'] = unique_pks_tog.loc[:, 'pk_LAT']\n unique_pks_tog['minElevated'] = datFram.minElevated[0]\n unique_pks_tog['Longitude'] = unique_pks_tog.loc[:, 'pk_LON']\n unique_pks_tog_stripped = unique_pks_tog.loc[:,\n ['OP_NUM', 'pk_LAT','pk_LON', 'pkGEO','pk_maxCH4_AB', 'pk_maxC2H6_AB', 'geometry',\n 'min_read', 'numtimes', 'numdays', 'recombine', 'pk_Dates', 'min_Date',\n 'verified','Latitude','Longitude','Overall_LON','Overall_LAT','wooind','save','pass'\n ]]\n unique_pk_names = unique_pks_tog.OP_NUM.drop_duplicates().tolist()\n unique_all = datFram.loc[datFram['OP_NUM'].isin(unique_pk_names), :]\n\n finaldf = pd.merge(unique_pks_tog_stripped, unique_all, on='OP_NUM')\n #unique_pks_tog.to_csv(new_loc, index=False)\n finaldf.to_csv(new_loc, index=False)\n return", "def sort_false(user):\n false_list = user.opinion_set.filter(opinion=False)\n return false_list", "def browse(self, lat, lon):\n places = self.filter(active=True).order_by('-id')[:10]\n items = []\n for item in places:\n item.distance = item.compute_distance(lat, lon)\n item.orientation = self.orientation(int(item.compute_orientation(lat,lon)))\n items.append(item)\n return items", "def __push_polygons(self, graph, u, v, vert_dict):\n # Here we assume that the edge of the wall is not in the graph, otherwise there is no adjustment needed\n # Find the shortest path from start to end\n # Use this path to check which polygons are on the side of the path away from the origin\n path = bfs_path(graph, u, v)\n if len(path) <= 2:\n return\n # Vector representing the edge of the wall\n wall_vector = Point(v.get_x() - u.get_x(), v.get_y() - u.get_y())\n # Find the midpoint of the edge\n push_polygons = set()\n edge_start = path.pop()\n while len(path) > 0:\n edge_end = path.pop()\n # The midpoint will be treated as the endpoint of a vector from the origin and used to determine which\n # normal vector is pointing away from the origin\n midpoint = Point((edge_start.get_x() + edge_end.get_x()) / 2,\n (edge_start.get_y() + edge_end.get_y()) / 2)\n # Create <edge_start, edge_end> as an edge vector, normalize it\n edge_vector = Point(edge_end.get_x() - edge_start.get_x(),\n edge_end.get_y() - edge_start.get_y())\n norm = edge_vector.simple_distance(edge_start)\n edge_vector.set(edge_vector.get_x(), edge_vector.get_y())\n # Find the normalized normal vector in relation to edge_vector\n normal = Point(-edge_vector.get_y() / norm, edge_vector.get_x() / norm)\n del norm\n # Find the dot product between the midpoint vector and the normal vector\n dot_product = midpoint.get_x() * normal.get_x() + midpoint.get_y() * normal.get_y()\n if dot_product > 0:\n # This is the point going away from the origin, find the polygon containing this midpoint + normal\n point = Point(midpoint.get_x() + normal.get_x(), midpoint.get_y() + normal.get_y())\n elif dot_product < 0:\n # Flip the direction\n point = Point(midpoint.get_x() - normal.get_x(), midpoint.get_y() - normal.get_y())\n else:\n continue\n point_edge_vector = Point(edge_end.get_x() - u.get_x(), edge_end.get_y() - u.get_y())\n new_vertex = u + project_vector(point_edge_vector, wall_vector)\n for r in self.regions:\n if r.is_contained(point):\n push_polygons.add(r)\n move_vertex(r, edge_end, new_vertex, vert_dict)\n break\n # The end of this edge is by definition the start of the next edge\n edge_start = new_vertex", "def sort_true(user):\n true_list = user.opinion_set.filter(opinion=True)\n return true_list" ]
[ "0.6151991", "0.56732446", "0.54792386", "0.5420772", "0.53110796", "0.5207796", "0.51546836", "0.5097908", "0.50740474", "0.4913443", "0.4897196", "0.4888024", "0.4886229", "0.4841173", "0.4825977", "0.4820225", "0.47709885", "0.47376594", "0.4711727", "0.4710147", "0.4695224", "0.467446", "0.46743205", "0.46730092", "0.46696123", "0.46591988", "0.46495637", "0.46470392", "0.4640117", "0.46394232" ]
0.60019916
1
Method to filter Polygon by user
def get_queryset(self): return ProviderPolygon.objects.filter(user=self.kwargs['pk_user'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n return ProviderPolygon.objects.filter(\n user=self.kwargs['pk']).order_by('-id')", "def get_shape_filter(shapefile):\n with fiona.open(shapefile) as collection:\n shp = collection[0]['geometry']\n project = partial(\n pyproj.transform,\n pyproj.Proj(init=collection.crs['init']),\n pyproj.Proj(init='epsg:4326'))\n shp = transform(project, shape(shp))\n\n def filter_function(item):\n if item['properties'].get('available'):\n return True\n return shp.intersects(shape(item['geometry']))\n\n return filter_function", "def filter_region(image, vertices):\n mask = np.zeros_like(image)\n if len(mask.shape)==2:\n cv2.fillPoly(mask, vertices, 255)\n else:\n # In case the input image has a channel dimension\n cv2.fillPoly(mask, vertices, (255,) * mask.shape[2]) \n return cv2.bitwise_and(image, mask)", "def get_visible_field_polygons(self):\n pass", "def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore", "def filter_geom(geom, _type):\n return list(filter(lambda x: isinstance(x, _type), geom))", "def select_region(image):\n # Define the polygon by vertices\n rows, cols = image.shape[:2]\n bottom_left = [cols*0.05, rows*0.95]\n top_left = [cols*0.3, rows*0.55]\n bottom_right = [cols*0.95, rows*0.95]\n top_right = [cols*0.7, rows*0.55]\n # Vertices are an array of polygons (i.e array of arrays) and the data type must be integer.\n vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)\n return filter_region(image, vertices)", "def filter(self, drawable):\n pass", "def points_in_polygon(polygon, points, buffer=0.):\n mpath = Path( polygon )\n return mpath.contains_points(points, radius=-buffer)", "def spatialFilter(input_shp,aoi,output_shp):\n inDataSource = driver.Open(input_shp, 0)\n inlayer = inDataSource.GetLayer()\n\n # create the data source\n outdata_source = driver.CreateDataSource(output_shp)\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n # create the layer\n outlayer = outdata_source.CreateLayer(\"outlayer\", srs, ogr.wkbPolygon)\n\n # Add input Layer Fields to the output Layer if it is the one we want\n inLayerDefn = inlayer.GetLayerDefn()\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outlayer.CreateField(fieldDefn)\n\n #load spatialfilter\n inspatialfilter = driver.Open(aoi, 0)\n inspatialfilterlayer = inspatialfilter.GetLayer()\n #get geometry for spatialfilter\n for inFeature in inspatialfilterlayer:\n spatialfiltergeom = inFeature.GetGeometryRef()\n\n inlayer.SetSpatialFilter(spatialfiltergeom)\n # Get the output Layer's Feature Definition\n outLayerDefn = outlayer.GetLayerDefn()\n for inFeature in inlayer:\n # Create output Feature\n outFeature = ogr.Feature(outLayerDefn)\n try:\n # Add field values from input Layer\n for i in range(0, outLayerDefn.GetFieldCount()):\n fieldDefn = outLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(),inFeature.GetField(i))\n # Set geometry\n geom = inFeature.GetGeometryRef()\n outFeature.SetGeometry(geom.Clone())\n # Add new feature to output Layer\n outlayer.CreateFeature(outFeature)\n except Exception:\n sys.exc_clear()\n inlayer.SetSpatialFilter(None)", "def get_gaussian_low_pass_filter(self, shape, cutoff):\r\n P = shape[0]\r\n Q = shape[1]\r\n mask = np.zeros((P, Q))\r\n for u in range(P):\r\n for v in range(Q):\r\n dist = (((u - (P / 2)) ** 2) + ((v - (Q / 2)) ** 2)) ** (1 / 2)\r\n\r\n mask[u][v] = math.exp((-dist ** 2) / (2 * (cutoff ** 2)))\r\n\r\n return mask", "def filter_for_user(self, user):\n query = Q(visibility=Document.PUBLIC) | \\\n Q(visibility=Document.PRIVATE, created_by=user) | \\\n Q(visibility=Document.ORG_ONLY,\n organization__memberships__user=user)\n\n if not user.external:\n query = query | Q(visibility=Document.ORG_ONLY_NO_EXTERNAL,\n organization__memberships__user=user)\n\n return super(DocumentManager, self).get_query_set().filter(query) \\\n .distinct()", "def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped", "def remove_polygons(self, test):\n filtered_polys = []\n for element in self.polygons:\n pld = [(poly, l, dt) for poly, l, dt in zip(element.polygons, element.layers, element.datatypes)\n if not test(poly, l, dt)]\n if len(pld) == 0:\n pass # we don't need no empty polygons!\n else:\n polys, layers, dts = zip(*pld)\n element.polygons = polys\n element.layers = layers\n element.datatypes = dts\n filtered_polys.append(element)\n self.polygons = filtered_polys\n return self", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def point_in_polygons(user_point, polygons_list):\n arr = np.array([polygon.contains(user_point) for polygon in polygons_list])\n try:\n poly_id = np.where(arr == True)[0][0] + 1\n return int(poly_id)\n except IndexError:\n print(\"not in the polygon\")", "def region_of_interest(self, img, vertices):\r\n mask = np.zeros_like(img)\r\n # if len(img.shape) > 2:\r\n # channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\r\n # ignore_mask_color = (255,) * channel_count\r\n # else:\r\n # ignore_mask_color = 255\r\n\r\n #filling pixels inside the polygon defined by \"vertices\" with the fill color\r\n cv2.fillPoly(mask, vertices, 255)\r\n\r\n #returning the image only where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n\r\n return masked_image", "def _boundary_filter(self, south, west, north, east):\n return Q(latitude__gt=south, longitude__gt=west, \n latitude__lt=north, longitude__lt=east)", "def within_polygon(self, poly):\n if hasattr(self, \"quadtree\"):\n bbox = poly.get_bbox(crs=self.crs)\n candidate_indices = self.quadtree.search_within(*bbox)\n confirmed_indices = []\n for i in candidate_indices:\n if poly.contains(self[i]):\n confirmed_indices.append(i)\n confirmed_indices.sort()\n else:\n confirmed_indices = [i for (i, point) in enumerate(self)\n if poly.contains(point)]\n return self._subset(confirmed_indices)", "def search_by_coordinates():\n print('CRS used is EPSG:3857 \\n for reference check https://epsg.io/3857 ')\n x = float(input('Enter x coordinate\\n'))\n y = float(input('Enter y coordinate\\n'))\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\prealpinebavaria.shp\", x, y, 'Alpenvorland')\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\oberrheinmaintiefland.shp\", x, y, 'Oberrheinisches Tiefland')\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\Tiefland.shp\", x, y, 'Niederrheinisches Tiefland')", "def filterf(self):\n from scipy.ndimage.filters import gaussian_filter as gf\n self._obj['u'] = xr.DataArray(gf(self._obj['u'],1),dims=('x','y'))\n self._obj['v'] = xr.DataArray(gf(self._obj['v'],1),dims=('x','y'))\n return self._obj", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(img, vertices, debug = False):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def bounding_box_filter(points, x_range, y_range, z_range):\n min_x, max_x = x_range\n min_y, max_y = y_range\n min_z, max_z = z_range\n\n bound_x = np.logical_and(points[:, 0] > min_x, points[:, 0] < max_x)\n bound_y = np.logical_and(points[:, 1] > min_y, points[:, 1] < max_y)\n bound_z = np.logical_and(points[:, 2] > min_z, points[:, 2] < max_z)\n\n bb_filter = np.logical_and(np.logical_and(bound_x, bound_y), bound_z)\n\n return points[bb_filter]" ]
[ "0.6240812", "0.6093261", "0.561257", "0.5468393", "0.54671586", "0.54410464", "0.5379727", "0.5269086", "0.52619064", "0.5252823", "0.5219768", "0.5207153", "0.5165389", "0.5165207", "0.5137693", "0.5133303", "0.51292515", "0.51259184", "0.51160175", "0.51157916", "0.5115787", "0.5111333", "0.5111333", "0.5111333", "0.5111333", "0.5111333", "0.5111333", "0.5111333", "0.5088017", "0.507631" ]
0.6862393
0
Labels positions Returns xticks, xticklabels
def get_xticks_labels(self): xticks = np.arange(-1, self.space_between_glyphs * len(self.positions)) xticklabels = [' '] for p in self.positions: xticklabels.append(str(p)) xticklabels += [' '] * self.space_between_glyphs return xticks, xticklabels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_xticks_labels(self):\n xticks = np.arange(-1, self.space_between_glyphs * len(self.coevolving_positions) * 3)\n xticklabels = [' ']\n for p1, p2 in self.coevolving_positions:\n xticklabels += [str(p1), ' ', str(p2)] + [' '] * self.space_between_glyphs\n return xticks, xticklabels", "def labelpos(self):\n return self._labelpos", "def labels(self):\n return self.label(self.p_y_given_x)", "def generateLabelsTicks(posns):\n if len(posns) <= 10:\n return labelsTicks(posns, 1)\n elif len(posns) <= 50:\n return labelsTicks(posns, 5)\n else:\n return labelsTicks(posns, 10)", "def labels(self, start, end, numlabels=None, char_width=None):\n ticks = self.ticks(start, end, numlabels)\n labels = self.formatter.format(ticks, numlabels, char_width)\n return zip(ticks, labels)", "def setLabelPosition(position='ticks', axes='XYZ'):\n pdict = {'ticks':'TICKS', 'center':'CENTER', 'shift':'SHIFT'}\n dislin.labpos(pdict[position], axes)", "def style_x_labels(fig):\r\n for label in fig.get_xticklabels():\r\n label.set_rotation(36)\r\n label.set_fontsize(9)", "def test_manual_ticklabels(self):\n fix_text_kerning_factor()\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n\n # Specify tick positions manually.\n ticks = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]\n labels = [\"0/5\", \"1/5\", \"2/5\", \"3/5\", \"4/5\", \"5/5\"]\n ax.taxis.set_ticks(ticks, labels=labels)\n ax.laxis.set_ticks(ticks, labels=labels)\n ax.raxis.set_ticks(ticks, labels=labels)", "def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()", "def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')", "def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title", "def get_tpx_labels():\n\t\n\tlabels_abs = get_tpx_labels_abs()\n\tlabels_rel = get_tpx_labels_rel()\n\tlabels_prop = get_tpx_labels_prop()\n\tlabels_special = get_tpx_labels_special()\n\tlabels = copy.copy(labels_abs) + copy.copy(labels_rel) + copy.copy(labels_prop) + copy.copy(labels_special)\n\t\n\treturn labels", "def _get_labels(self, ind):\n pass", "def __add_labels(self, x_list, y_list, ax, x_label, y_label, **kwargs):\n # remove spines\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n\n # get labels for x and y axis\n x_labels = set_labels(ax=ax, label_value=x_list, label_axis='x')\n y_labels = set_labels(ax=ax, label_value=y_list, label_axis='y')\n\n # set ticklabels\n ax.set_xticklabels(x_labels, fontsize=self.ticklabel_size,\n rotation=self.rotate_xticks, **kwargs)\n ax.set_yticklabels(y_labels, fontsize=self.ticklabel_size,\n rotation=self.rotate_yticks, **kwargs)\n\n # set x and y axis labels\n ax.set_xlabel(\n x_label, fontsize=self.label_size, labelpad=self.labelpad, x=self.align_xval,\n **kwargs\n )\n ax.set_ylabel(\n y_label, fontsize=self.label_size, labelpad=self.labelpad, y=self.align_yval,\n **kwargs\n )\n ax.xaxis.label.set_color(self.label_color)\n ax.yaxis.label.set_color(self.label_color)\n\n # remove tick marks\n ax.tick_params(axis='both', which='both', length=0, colors=self.label_color)\n\n if self.show_right:\n ax.tick_params(\n direction='out', axis='y', which='both', labelleft=True, labelright=True,\n right=True, left=True\n )", "def get_ticks(self, sig=2, n_tick=10):\n fmt = '%.' + str(sig) + 'g'\n xlabels = np.zeros(self.x_axis.shape, dtype=object)\n xlabels[:] = \"\"\n each_n_pixels = math.floor(len(xlabels) / n_tick)\n for i in range(len(xlabels)):\n if i % each_n_pixels == 0:\n xlabels[i] = '%s' % float(fmt % self.x_axis[i])\n ylabels = np.zeros(self.y_axis.shape, dtype=object)\n ylabels[:] = \"\"\n each_n_pixels = math.floor(len(ylabels) / n_tick)\n for i in range(len(ylabels)):\n if i % each_n_pixels == 0:\n ylabels[i] = '%s' % float(fmt % self.y_axis[i])\n return xlabels, ylabels", "def setTickPosition(position='sameaslabels', axes='XYZ'):\n tickdict = {'sameaslabels':'LABELS', 'inside':'REVERS','center':'CENTER'}\n dislin.ticpos(tickdict[position],axes)", "def set_labels(ax, label_value, label_axis):\n if label_axis == 'x':\n ax.set_xticks(np.arange(len(label_value)))\n axis = ax.get_xticklabels()\n else:\n ax.set_yticks(np.arange(len(label_value)) + 1)\n axis = ax.get_yticklabels()\n\n # fetch labels\n labels = [items.get_text() for items in axis]\n\n # init a count variable\n if label_axis == 'x':\n count = 0\n else:\n count = len(label_value) - 1\n\n # iterate through all the labels and change the label name\n for i in range(len(labels)):\n labels[i] = label_value[count]\n\n if label_axis == 'x':\n count += 1\n else:\n count -= 1\n\n return labels", "def ticks(axisdim, psize, nticks=8):\n\n axisdim = int(axisdim)\n nticks = int(nticks)\n if not axisdim % 2: axisdim += 1\n if nticks % 2: nticks -= 1\n tickspacing = float((axisdim-1))/nticks\n ticklocs = np.arange(0, axisdim+1, tickspacing) - 0.5\n ticklabels= np.around(psize * np.arange((axisdim-1)/2.0, -(axisdim)/2.0, -tickspacing), decimals=1)\n\n return (ticklocs, ticklabels)", "def set_labels(x, y=''):\n plt.xlabel(x)\n plt.ylabel(y)", "def get_labelPositions(y_list, x_list):\n n_labels = len(y_list)\n\n # GET BORDER POINTS\n x_min, x_max = get_min_max(x_list)\n x_mid = (x_max - x_min) / 2\n\n y_min, y_max = get_min_max(y_list)\n y_mid = (y_max - y_min) / 2\n # Border points\n bp1 = np.array(list(product([x_min, x_max, x_mid], \n [y_min, y_max, y_mid])))[:-1]\n\n # Top right points\n # bp2 = np.array(list(product([0., 1.0, 0.75], \n # [0., 1.0, 0.75])))[:-1]\n\n # Bottom right points\n # bp3 = np.array(list(product([0., 1.0, 0.25], \n # [0., 1.0, 0.25])))[:-1] \n #border_points = np.vstack([bp1, bp2, bp3])\n border_points = np.vstack([bp1])\n n_border = border_points.shape[0]\n\n # Initialize placeholders\n ref_points = np.zeros((n_border + n_labels, 2))\n\n label_positions = np.zeros((n_labels, 2))\n label_indices = np.zeros(n_labels, int)\n\n \n \n ref_points[:n_border] = border_points\n\n for i in range(n_labels):\n # GET POSITIONS\n n_points = x_list[i].size\n xy_points = np.zeros((n_points, 2))\n\n xy_points[:, 0] = x_list[i]\n xy_points[:, 1] = y_list[i]\n \n # GET REF POINTS\n dist = get_pairwise_distances(xy_points, ref_points[:n_border + i])\n\n # GET MINIMUM DISTANCES\n min_dist = dist.min(axis=1)\n\n # GET MAXIMUM MINIMUM DISTANCE\n label_index = np.argmax(min_dist)\n label_pos = xy_points[label_index]\n\n ref_points[n_border + i] = label_pos\n label_positions[i] = label_pos\n label_indices[i] = label_index\n\n return label_positions, label_indices", "def universal_plot_labels(fig, xlabel, ylabel):\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()", "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def __place_statistics_labels(self):\n\n base_x = self.__statistics_coords[\"x\"]\n base_y = self.__statistics_coords[\"y\"]\n active_lines_label = Label(self.__main_window, textvariable=self.__active_lines_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_buses_label = Label(self.__main_window, textvariable=self.__active_buses_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_people_lable = Label(self.__main_window, textvariable=self.__number_of_people_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n session_time_lable = Label(self.__main_window, textvariable=self.__session_time_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 23))\n number_of_people_lable.place(x=base_x, y=base_y)\n active_lines_label.place(x=base_x-35, y=base_y + 35)\n number_of_buses_label.place(x=base_x+54, y=base_y + 69)\n session_time_lable.place(x=base_x-70, y=base_y + 116)", "def _get_window_labels(self, window):\n\n # Get summary stats, if they have not yet been triggered\n if not self.summary_info:\n self.get_summary_stats()\n\n # Get contig boundary positon\n c = 0\n xbars = []\n for contig, seq in self.contigs.items():\n contig_id = self._get_contig_id(contig)\n self.contig_boundaries[contig_id] = [c, c + len(seq)]\n c += len(seq)\n xbars.append(\n {\n \"contig\": contig_id,\n \"position\": c / window,\n \"absPosition\": c,\n \"window\": window\n }\n )\n\n # Get label contig for each window\n labels = []\n for i in range(0, self.summary_info[\"total_len\"], window):\n for contig, rg in self.contig_boundaries.items():\n if rg[0] <= i < rg[1]:\n labels.append(\"{}_{}\".format(contig, i))\n break\n\n return labels, xbars", "def _pos2label(self, p, labels):\n if labels is not None:\n if p in labels.keys():\n return labels[p]\n else:\n return ''\n # raise ValueError('Fatal ERROR: no label for this position in label dictionary!')\n else:\n if p == 1:\n return 'top'\n elif p == 2:\n return 'bottom'\n elif p == 3:\n return 'left'\n elif p == 4:\n return 'right'", "def get_axis_labels(\n self,\n x_label: float | str | Mobject = \"x\",\n y_label: float | str | Mobject = \"y\",\n ) -> VGroup:\n\n self.axis_labels = VGroup(\n self.get_x_axis_label(x_label),\n self.get_y_axis_label(y_label),\n )\n return self.axis_labels", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def autolabel(rects, xpos='center'):\n\n xpos = xpos.lower() # normalize the case of the parameter\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off\n\n for rect in rects:\n \n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,\n '{}'.format(height), ha=ha[xpos], va='bottom')", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def auto_labels(self,top=True,bottom=True,top_label='',bottom_label='',\\\n col_index=0,row_index=0):\n param=self.x_param\n\n top_label=[top_label+\" \"+ x for x in param.labels]\n\n bottom_label=[bottom_label+\"{:02d} x {:02d}\".format(col_index,y) for y in range(row_index,row_index+len(param))]\n\n if top==True :\n\n self.labels_top=top_label\n\n else:\n\n self.labels_top=None\n\n if bottom==True :\n\n self.labels_bottom=bottom_label\n\n else:\n\n self.labels_bottom=None" ]
[ "0.79856944", "0.6753631", "0.6689842", "0.66790074", "0.64383024", "0.6406252", "0.63756293", "0.6367111", "0.63649714", "0.63599926", "0.634465", "0.6342175", "0.63284785", "0.6284176", "0.6272564", "0.6271432", "0.6250377", "0.618463", "0.61352825", "0.6110104", "0.60988957", "0.60971934", "0.6068962", "0.6029818", "0.6022334", "0.6004712", "0.5990371", "0.5990338", "0.5977793", "0.59602463" ]
0.8132816
0
Labels positions Returns xticks, xticklabels
def get_xticks_labels(self): xticks = np.arange(-1, self.space_between_glyphs * len(self.coevolving_positions) * 3) xticklabels = [' '] for p1, p2 in self.coevolving_positions: xticklabels += [str(p1), ' ', str(p2)] + [' '] * self.space_between_glyphs return xticks, xticklabels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_xticks_labels(self):\n xticks = np.arange(-1, self.space_between_glyphs * len(self.positions))\n xticklabels = [' ']\n for p in self.positions:\n xticklabels.append(str(p))\n xticklabels += [' '] * self.space_between_glyphs\n return xticks, xticklabels", "def labelpos(self):\n return self._labelpos", "def labels(self):\n return self.label(self.p_y_given_x)", "def generateLabelsTicks(posns):\n if len(posns) <= 10:\n return labelsTicks(posns, 1)\n elif len(posns) <= 50:\n return labelsTicks(posns, 5)\n else:\n return labelsTicks(posns, 10)", "def labels(self, start, end, numlabels=None, char_width=None):\n ticks = self.ticks(start, end, numlabels)\n labels = self.formatter.format(ticks, numlabels, char_width)\n return zip(ticks, labels)", "def setLabelPosition(position='ticks', axes='XYZ'):\n pdict = {'ticks':'TICKS', 'center':'CENTER', 'shift':'SHIFT'}\n dislin.labpos(pdict[position], axes)", "def style_x_labels(fig):\r\n for label in fig.get_xticklabels():\r\n label.set_rotation(36)\r\n label.set_fontsize(9)", "def test_manual_ticklabels(self):\n fix_text_kerning_factor()\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n\n # Specify tick positions manually.\n ticks = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]\n labels = [\"0/5\", \"1/5\", \"2/5\", \"3/5\", \"4/5\", \"5/5\"]\n ax.taxis.set_ticks(ticks, labels=labels)\n ax.laxis.set_ticks(ticks, labels=labels)\n ax.raxis.set_ticks(ticks, labels=labels)", "def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()", "def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')", "def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title", "def get_tpx_labels():\n\t\n\tlabels_abs = get_tpx_labels_abs()\n\tlabels_rel = get_tpx_labels_rel()\n\tlabels_prop = get_tpx_labels_prop()\n\tlabels_special = get_tpx_labels_special()\n\tlabels = copy.copy(labels_abs) + copy.copy(labels_rel) + copy.copy(labels_prop) + copy.copy(labels_special)\n\t\n\treturn labels", "def _get_labels(self, ind):\n pass", "def __add_labels(self, x_list, y_list, ax, x_label, y_label, **kwargs):\n # remove spines\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n\n # get labels for x and y axis\n x_labels = set_labels(ax=ax, label_value=x_list, label_axis='x')\n y_labels = set_labels(ax=ax, label_value=y_list, label_axis='y')\n\n # set ticklabels\n ax.set_xticklabels(x_labels, fontsize=self.ticklabel_size,\n rotation=self.rotate_xticks, **kwargs)\n ax.set_yticklabels(y_labels, fontsize=self.ticklabel_size,\n rotation=self.rotate_yticks, **kwargs)\n\n # set x and y axis labels\n ax.set_xlabel(\n x_label, fontsize=self.label_size, labelpad=self.labelpad, x=self.align_xval,\n **kwargs\n )\n ax.set_ylabel(\n y_label, fontsize=self.label_size, labelpad=self.labelpad, y=self.align_yval,\n **kwargs\n )\n ax.xaxis.label.set_color(self.label_color)\n ax.yaxis.label.set_color(self.label_color)\n\n # remove tick marks\n ax.tick_params(axis='both', which='both', length=0, colors=self.label_color)\n\n if self.show_right:\n ax.tick_params(\n direction='out', axis='y', which='both', labelleft=True, labelright=True,\n right=True, left=True\n )", "def get_ticks(self, sig=2, n_tick=10):\n fmt = '%.' + str(sig) + 'g'\n xlabels = np.zeros(self.x_axis.shape, dtype=object)\n xlabels[:] = \"\"\n each_n_pixels = math.floor(len(xlabels) / n_tick)\n for i in range(len(xlabels)):\n if i % each_n_pixels == 0:\n xlabels[i] = '%s' % float(fmt % self.x_axis[i])\n ylabels = np.zeros(self.y_axis.shape, dtype=object)\n ylabels[:] = \"\"\n each_n_pixels = math.floor(len(ylabels) / n_tick)\n for i in range(len(ylabels)):\n if i % each_n_pixels == 0:\n ylabels[i] = '%s' % float(fmt % self.y_axis[i])\n return xlabels, ylabels", "def setTickPosition(position='sameaslabels', axes='XYZ'):\n tickdict = {'sameaslabels':'LABELS', 'inside':'REVERS','center':'CENTER'}\n dislin.ticpos(tickdict[position],axes)", "def set_labels(ax, label_value, label_axis):\n if label_axis == 'x':\n ax.set_xticks(np.arange(len(label_value)))\n axis = ax.get_xticklabels()\n else:\n ax.set_yticks(np.arange(len(label_value)) + 1)\n axis = ax.get_yticklabels()\n\n # fetch labels\n labels = [items.get_text() for items in axis]\n\n # init a count variable\n if label_axis == 'x':\n count = 0\n else:\n count = len(label_value) - 1\n\n # iterate through all the labels and change the label name\n for i in range(len(labels)):\n labels[i] = label_value[count]\n\n if label_axis == 'x':\n count += 1\n else:\n count -= 1\n\n return labels", "def ticks(axisdim, psize, nticks=8):\n\n axisdim = int(axisdim)\n nticks = int(nticks)\n if not axisdim % 2: axisdim += 1\n if nticks % 2: nticks -= 1\n tickspacing = float((axisdim-1))/nticks\n ticklocs = np.arange(0, axisdim+1, tickspacing) - 0.5\n ticklabels= np.around(psize * np.arange((axisdim-1)/2.0, -(axisdim)/2.0, -tickspacing), decimals=1)\n\n return (ticklocs, ticklabels)", "def set_labels(x, y=''):\n plt.xlabel(x)\n plt.ylabel(y)", "def get_labelPositions(y_list, x_list):\n n_labels = len(y_list)\n\n # GET BORDER POINTS\n x_min, x_max = get_min_max(x_list)\n x_mid = (x_max - x_min) / 2\n\n y_min, y_max = get_min_max(y_list)\n y_mid = (y_max - y_min) / 2\n # Border points\n bp1 = np.array(list(product([x_min, x_max, x_mid], \n [y_min, y_max, y_mid])))[:-1]\n\n # Top right points\n # bp2 = np.array(list(product([0., 1.0, 0.75], \n # [0., 1.0, 0.75])))[:-1]\n\n # Bottom right points\n # bp3 = np.array(list(product([0., 1.0, 0.25], \n # [0., 1.0, 0.25])))[:-1] \n #border_points = np.vstack([bp1, bp2, bp3])\n border_points = np.vstack([bp1])\n n_border = border_points.shape[0]\n\n # Initialize placeholders\n ref_points = np.zeros((n_border + n_labels, 2))\n\n label_positions = np.zeros((n_labels, 2))\n label_indices = np.zeros(n_labels, int)\n\n \n \n ref_points[:n_border] = border_points\n\n for i in range(n_labels):\n # GET POSITIONS\n n_points = x_list[i].size\n xy_points = np.zeros((n_points, 2))\n\n xy_points[:, 0] = x_list[i]\n xy_points[:, 1] = y_list[i]\n \n # GET REF POINTS\n dist = get_pairwise_distances(xy_points, ref_points[:n_border + i])\n\n # GET MINIMUM DISTANCES\n min_dist = dist.min(axis=1)\n\n # GET MAXIMUM MINIMUM DISTANCE\n label_index = np.argmax(min_dist)\n label_pos = xy_points[label_index]\n\n ref_points[n_border + i] = label_pos\n label_positions[i] = label_pos\n label_indices[i] = label_index\n\n return label_positions, label_indices", "def universal_plot_labels(fig, xlabel, ylabel):\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()", "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def __place_statistics_labels(self):\n\n base_x = self.__statistics_coords[\"x\"]\n base_y = self.__statistics_coords[\"y\"]\n active_lines_label = Label(self.__main_window, textvariable=self.__active_lines_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_buses_label = Label(self.__main_window, textvariable=self.__active_buses_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_people_lable = Label(self.__main_window, textvariable=self.__number_of_people_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n session_time_lable = Label(self.__main_window, textvariable=self.__session_time_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 23))\n number_of_people_lable.place(x=base_x, y=base_y)\n active_lines_label.place(x=base_x-35, y=base_y + 35)\n number_of_buses_label.place(x=base_x+54, y=base_y + 69)\n session_time_lable.place(x=base_x-70, y=base_y + 116)", "def _get_window_labels(self, window):\n\n # Get summary stats, if they have not yet been triggered\n if not self.summary_info:\n self.get_summary_stats()\n\n # Get contig boundary positon\n c = 0\n xbars = []\n for contig, seq in self.contigs.items():\n contig_id = self._get_contig_id(contig)\n self.contig_boundaries[contig_id] = [c, c + len(seq)]\n c += len(seq)\n xbars.append(\n {\n \"contig\": contig_id,\n \"position\": c / window,\n \"absPosition\": c,\n \"window\": window\n }\n )\n\n # Get label contig for each window\n labels = []\n for i in range(0, self.summary_info[\"total_len\"], window):\n for contig, rg in self.contig_boundaries.items():\n if rg[0] <= i < rg[1]:\n labels.append(\"{}_{}\".format(contig, i))\n break\n\n return labels, xbars", "def _pos2label(self, p, labels):\n if labels is not None:\n if p in labels.keys():\n return labels[p]\n else:\n return ''\n # raise ValueError('Fatal ERROR: no label for this position in label dictionary!')\n else:\n if p == 1:\n return 'top'\n elif p == 2:\n return 'bottom'\n elif p == 3:\n return 'left'\n elif p == 4:\n return 'right'", "def get_axis_labels(\n self,\n x_label: float | str | Mobject = \"x\",\n y_label: float | str | Mobject = \"y\",\n ) -> VGroup:\n\n self.axis_labels = VGroup(\n self.get_x_axis_label(x_label),\n self.get_y_axis_label(y_label),\n )\n return self.axis_labels", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def autolabel(rects, xpos='center'):\n\n xpos = xpos.lower() # normalize the case of the parameter\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off\n\n for rect in rects:\n \n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,\n '{}'.format(height), ha=ha[xpos], va='bottom')", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def auto_labels(self,top=True,bottom=True,top_label='',bottom_label='',\\\n col_index=0,row_index=0):\n param=self.x_param\n\n top_label=[top_label+\" \"+ x for x in param.labels]\n\n bottom_label=[bottom_label+\"{:02d} x {:02d}\".format(col_index,y) for y in range(row_index,row_index+len(param))]\n\n if top==True :\n\n self.labels_top=top_label\n\n else:\n\n self.labels_top=None\n\n if bottom==True :\n\n self.labels_bottom=bottom_label\n\n else:\n\n self.labels_bottom=None" ]
[ "0.8132898", "0.67532235", "0.6690399", "0.6679199", "0.64378524", "0.64060897", "0.6375115", "0.63673866", "0.6364803", "0.63602555", "0.63443977", "0.63418126", "0.6328635", "0.6284126", "0.6273749", "0.6271968", "0.6250492", "0.61854696", "0.6135588", "0.6110145", "0.6098517", "0.60974467", "0.6069889", "0.602955", "0.60224044", "0.60048276", "0.5991316", "0.59906065", "0.5977156", "0.5960264" ]
0.79857206
1
Recovers existing persistent job store.
def recover(sched, job_state_file): logger.debug("jobs before shelve recovery: %d" % len(sched.get_jobs())) logger.debug("jobs: " + str(sched.get_jobs())) job_store = ShelveJobStore(job_state_file) sched.add_jobstore(job_store, _FHANDLE) logger.debug("jobs after shelve recovery: %d" % len(sched.get_jobs())) logger.debug("jobs: " + str(sched.get_jobs()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)", "def restore(self, checkpoint):\n raise NotImplementedError", "def recover(self):\n if self.get_info_from_db():\n logger.info(\"Recover by reading previous results\")\n self.check_items(self.get_user_results_from_db())\n else:\n self.create_info_in_db() # create record in axdb", "def restore(self):\n # For multi-worker training, it should not restore a model in certain\n # worker setting (e.g. non-chief worker in ParameterServerStrategy).\n # pylint: disable=protected-access\n if self._model._in_multi_worker_mode() and not multi_worker_util.should_load_checkpoint():\n return\n self.read_checkpoint_manager.restore_or_initialize()", "def handle_store(self, store):\n # Get storage UIDs\n uids = set(store.get_uids())\n\n # Remove known UIDs\n uids.difference_update(self._status.list())\n\n for uid in uids:\n # Reload all stored compositions\n stored = store.load(uid)\n\n name = stored['name']\n distribution = stored['distribution']\n\n try:\n _logger.debug(\"Reloading %s - %s...\", name, uid)\n self._composer.reload_distribution(name, distribution, uid)\n except KeyError:\n # Already known distribution\n pass", "def jobs_load(self):\n\n if os.path.exists('trunk/records/jobs_saved.pkl'):\n with open('trunk/records/jobs_saved.pkl', 'rb') as file_input:\n self.old_jobs = pickle.load(file_input)\n return True\n else:\n print(\"job_records cannot be found\")\n return False", "def EnableRestoring(self):\n\n self._doRestore = True", "def restore(self, checkpoint_path: str):\r\n raise NotImplementedError", "async def async_restore(self):\n await self._client.restore()\n self.async_write_ha_state()", "def restartCheckpoint(self, jobStore):\n assert self.checkpoint is not None\n successorsDeleted = []\n if self.stack or self.services or self.command != None:\n if self.command != None:\n assert self.command == self.checkpoint\n logger.debug(\"Checkpoint job already has command set to run\")\n else:\n self.command = self.checkpoint\n\n jobStore.update(self) # Update immediately to ensure that checkpoint\n # is made before deleting any remaining successors\n\n if self.stack or self.services:\n # If the subtree of successors is not complete restart everything\n logger.debug(\"Checkpoint job has unfinished successor jobs, deleting the jobs on the stack: %s, services: %s \" %\n (self.stack, self.services))\n # Delete everything on the stack, as these represent successors to clean\n # up as we restart the queue\n def recursiveDelete(jobGraph2):\n # Recursive walk the stack to delete all remaining jobs\n for jobs in jobGraph2.stack + jobGraph2.services:\n for jobNode in jobs:\n if jobStore.exists(jobNode.jobStoreID):\n recursiveDelete(jobStore.load(jobNode.jobStoreID))\n else:\n logger.debug(\"Job %s has already been deleted\", jobNode)\n if jobGraph2 != self:\n logger.debug(\"Checkpoint is deleting old successor job: %s\", jobGraph2.jobStoreID)\n jobStore.delete(jobGraph2.jobStoreID)\n successorsDeleted.append(jobGraph2.jobStoreID)\n recursiveDelete(self)\n\n self.stack = [ [], [] ] # Initialise the job to mimic the state of a job\n # that has been previously serialised but which as yet has no successors\n\n self.services = [] # Empty the services\n\n # Update the jobStore to avoid doing this twice on failure and make this clean.\n jobStore.update(self)\n return successorsDeleted", "def _recover(self,):\n modlogger.debug( \"starting recovery\")\n with self.id_lock: #Prevent new ops being created.\n logs = [ LogFile(x,readonly=True) for x in self._findlogs() ]\n logiter = [ iter(x) for x in logs ]\n ops = [ _getop(x) for x in logiter ]\n opids = [ _getid(x) for x in ops ]\n #order the log files by operation Id.\n data = sorted(zip(logs,logiter,ops,opids),key =lambda x:x[3])\n modlogger.debug( \"SR:%s\"%data)\n #And now got through all log files in Id order\n state = 'init'\n unrecoverable = []\n for log,it,op,opid in data:\n for cur_op in chain([op],it):\n #cur_op None indicated end of that logfile.\n if cur_op is None: break\n\n #We ignore any ops until we see a 'startTxn' marker, but we\n # keep a record of there ids to ensure we see a later checkpoint.\n # if we don't we can't replay partial Txn.\n modlogger.debug( \"R:%s,%s\",cur_op,state)\n if state=='init':\n #Record all operations we see before we see the first\n #start tx marker.\n if cur_op.optype == b'start_txn':\n state='txcomplete'\n elif cur_op.optype == b'abort_txn':\n #If the partial transaction we found was aborted\n # we don't need to worry about its operations. \n unrcoverable = [ ]\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else:\n unrecoverable += [ op.opid]\n \n\n #We are looking for a starttxn, marker to mark the operation\n #as valid. The only other meaningful transaction in the\n #journal in the state is a checkpoint making which ops have been\n #detected as committed to the main store by the FS.\n if state=='txcomplete':\n if cur_op.optype == b'start_txn':\n tx = cur_op.txn_id\n txops = [ ]\n state = 'txstarted'\n continue\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else: raise RecoveryError(\"Operation outside tx\")\n\n #In this state all operations are meaningful.\n # we store all operations (except checkpoint) until we see\n # a EndTxn op. At the end TxnOp we synchronously complete\n # all operations.\n if state =='txstarted':\n if cur_op.optype == b'end_txn': \n #The test below finds 'overlapped' tx, (or ones missing a commit record\n #for some reason. This forces us not to accept this log file.\n if cur_op.txn_id != tx: raise RecoveryError(\"Non matching Tx commit found\")\n else:\n for top in txops:\n top.do(sync = True)\n state = 'txcomplete'\n elif cur_op.optype == b'abort_txn':\n state = 'txcomplete'\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else:\n txops += [ cur_op ] \n #Log file has been processed successfully - remove it from the Fs.\n #we could call close() here and reused the allocated space on the\n #FS - but the logfile is readonly - and close() adds a terminator\n #to mark the file as empty.\n try:\n log.unlink()\n except OSError: pass\n\n #If there are any partial txn's left we have failed to recover.\n if unrecoverable: raise RecoveryError(\"Partial uncommitted txn found\")", "def reload_systemwide_ca_store(self):\n\n raise NotImplementedError()", "def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))", "def load_ckpt(self):\n status = self.ckpt_manager.restore_or_initialize()\n print(f\"ckpt_manager.restore_or_initialize(): {status}\")", "def reconstruct(self):\n if os.path.exists(self.dbname):\n with open(self.dbname, mode='rb') as db:\n self.cache = pickle.load(db)", "def retrieve(self):\n\t\timport shelve\n\t\timport sys\n\t\timport glob\n\n\t\td = shelve.open(\".storedmanager\")\n\t\tif not d.has_key(\"storedmanager\"):\n\t\t\t# Check if already is done the file\n\t\t\tif len(glob.glob(\"*.tar.gz\")) != 0:\n\t\t\t\tmessage = \"clustermanager.retrive: The job is already DONE!\"\n\t\t\telse:\n\t\t\t\tmessage = \"\\nclustermanager.retrieve: ERROR Not found the\" \\\n\t\t\t\t\t+\" class stored in .storedmanager file\"\n\t\t\tsys.exit(message)\n\n\t\tcopyself = d[\"storedmanager\"]\n\t\t\n\t\t# Putting the datamembers: FIXME: If you want all the datanames\n\t\t# do it with the __dict__ and __setattr__ methods\n\t\tself.nameID = copyself.nameID\n\t\tself.jobsid = copyself.jobsid\n\t\tself.jobidevt = copyself.jobidevt\n\t\tself.tasksID = copyself.tasksID\n\t\tself.outputfiles = copyself.outputfiles\n\t\tself.njobs = copyself.njobs\n\t\tself.basedir = copyself.basedir\n\t\tself.pkgpath = copyself.pkgpath\n\t\tself.libsdir = copyself.libsdir\n\t\tself.nevents = copyself.nevents\n\t\ttry:\n\t\t\tself.taskstatus = copyself.taskstatus\n\t\texcept AttributeError:\n\t\t\t# It means we have not yet done a previous harvest\n\t\t\tpass\n\t\t\n\t\td.close()", "def recover( self, job, job_wrapper ):\n job_state = self.__job_state( job, job_wrapper )\n job_wrapper.command_line = job.get_command_line()\n state = job.get_state()\n if state in [model.Job.states.RUNNING, model.Job.states.QUEUED]:\n log.debug( \"(LWR/%s) is still in running state, adding to the LWR queue\" % ( job.get_id()) )\n job_state.old_state = True\n job_state.running = state == model.Job.states.RUNNING\n self.monitor_queue.put( job_state )", "def autodiscover(self):\n # list sub folders\n p = Path(self.path)\n job_folder_lst = [str(x) for x in p.iterdir() if x.is_dir()]\n # create jobs\n jobs = {}\n for job_folder in job_folder_lst:\n if Job.verify_folder_name(job_folder):\n job = Job()\n job.create_from_path(job_folder)\n now = datetime.datetime.now()\n age = now - job.ctime\n if age.days > self.job_duration:\n job.destroy()\n else:\n jobs[job.ctime.strftime(\"%Y-%m-%d %H:%M:%S\")] = job\n # store jobs against creation time and status\n for creation_time in sorted(jobs, reverse=True):\n job = jobs[creation_time]\n if job.status == 'running':\n self.running.append(job)\n else:\n self.stopped.append(job)", "def reload(self):\n self.restore()", "def restore(self):\n\n self.brain.restore_checkpoint()", "def __restoreBackup(self):\n pass #FIXME!!!", "def Restore(self):\n\n return self._persistentHandler.Restore()", "def test_resume_restore(self):\n if not self.backupset.resume:\n self.fail(\"Resume must be True for this test\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.log.info(\"Start to flush bucket\")\n self._all_buckets_flush()\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version,\n force_updates=self.backupset.force_updates,\n no_resume=True)\n state = \"\"\n while state not in (\"FINISHED\", \"EXECUTING\"):\n state = restore_result.state\n self._kill_cbbackupmgr()\n self.assertFalse(self._check_output(\"success\", restore_result.result()))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "async def restore(cls, state, force=None):\n try:\n pk = state[f'{cls.__model__}_{cls.__pk__}']\n except KeyError:\n pk = state[cls.__pk__]\n\n # Check the default for force reloading\n if force is None:\n force = not cls.objects.table.bind.manager.cache\n\n # Check if this is in the cache\n cache = cls.objects.cache\n obj = cache.get(pk)\n if obj is None:\n # Create and cache it\n obj = cls.__new__(cls)\n cache[pk] = obj\n\n # This ideally should only be done if created\n await obj.__restorestate__(state)\n elif force or not obj.__restored__:\n await obj.__restorestate__(state)\n\n return obj", "def read_old_persistent_actions(self) -> None:\n global persistent_directory # pylint: disable=invalid-name\n path = os.path.join(persistent_directory.value, self.name + \".actions.yaml\")\n if not os.path.exists(path):\n Logger.why(f\"Must run actions because missing the persistent actions: {path}\")\n self.must_run_action = True\n return\n\n try:\n with open(path, \"r\") as file:\n data = yaml.full_load(file.read())\n self.old_persistent_actions = PersistentAction.from_data(data[\"actions\"])\n self.old_persistent_outputs = data[\"outputs\"]\n Logger.debug(f\"Read the persistent actions: {path}\")\n\n except BaseException: # pylint: disable=broad-except\n Logger.warning(f\"Must run actions \" f\"because read the invalid persistent actions: {path}\")\n self.must_run_action = True", "def _restore(self):\n\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1]) # Robust enough?\n return restored_step\n logging.info('Starting training from scratch.')\n return 0", "def restore_shelves():\n shelf_dir = os.path.join(clib.get_env_dir(), 'prefs', 'shelves')\n for shelf in os.listdir(shelf_dir):\n if shelf.endswith('.deleted'):\n restored_shelf = os.path.join(shelf_dir, shelf.split('.deleted')[0])\n deleted_shelf = os.path.join(shelf_dir, shelf)\n # check if it has not been somehow restored\n if os.path.isfile(restored_shelf):\n os.remove(deleted_shelf)\n else:\n os.rename(deleted_shelf, restored_shelf)\n clib.dialog_restart()", "def restore_model(self, resume_iters):\n if self.resume_iters:\n checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)\n print('Loading the trained models from step {}...'.format(resume_iters))\n G_path = os.path.join(checkpoint_dir, '{}-G.ckpt'.format(resume_iters))\n self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))", "def test_restore_with_erlang_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n conn.start_couchbase()\n conn.disconnect()\n timeout_now = 600\n output = restore_result.result(timeout=timeout_now)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with erlang crash and restart within 180 seconds\")\n self.log.info(\"Restore succeeded with erlang crash and restart within 180 seconds\")" ]
[ "0.60679954", "0.572598", "0.5668713", "0.56018865", "0.5573478", "0.5523758", "0.550701", "0.5487265", "0.5445101", "0.5402197", "0.53382146", "0.530911", "0.5260216", "0.52394015", "0.5200684", "0.51693606", "0.51428294", "0.5140267", "0.51309043", "0.5100432", "0.50839096", "0.50827074", "0.5068195", "0.50396365", "0.50371933", "0.50370896", "0.5023222", "0.5021396", "0.50157434", "0.50066715" ]
0.6704211
0
Links to output file(s) FITS %(fits_file)s %(xml_link)s
def files(self): if os.path.exists(self.xml_file): self.xml_link='<li>XML <a href="../../%(xml_file)s?download=true">%(xml_file)s</a></li>'.format(self.xml_file) else: self.xml_link='' # <a href="../../%(xml)s?download=true">%(xml)s</a></li> #self.xml = glob.glob('*.xml')[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _open_output_files(self):\n self.links_outfile = open(self.opts.links_outfile, 'wb')", "def write_fits(self, name=None, output_path=None):\n pass", "def save_as_fits(self, filename):", "def main(quiet=False):\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n newpath = r'%s/models' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n newpath = r'%s/out' % os.getcwdu()\n if not os.path.exists(newpath): os.makedirs(newpath)\n existing = sorted(os.listdir('%s/%s' % (os.getcwdu(), 'models'))) \n\n urls = [\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/parameters.fits.gz',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2J.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2H.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/2K.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I3.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/I4.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M1.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M2.fits',\n 'http://www.mpia-hd.mpg.de/~robitaille/share/andreas/M3.fits',\n 'http://caravan.astro.wisc.edu/protostars/files/extinction_law.tar.gz'\n ]\n file_names = [\n 'models/parameters.fits.gz',\n 'models/2J.fits',\n 'models/2H.fits',\n 'models/2K.fits',\n 'models/I1.fits',\n 'models/I2.fits',\n 'models/I3.fits',\n 'models/I4.fits',\n 'models/M1.fits',\n 'models/M2.fits',\n 'models/M3.fits',\n 'models/extinction_law.tar.gz']\n\n for i in range(len(urls)):\n if not os.path.isfile(file_names[i]):\n f = open(file_names[i], 'wb')\n f.write(urllib2.urlopen(urls[i]).read())\n f.close()\n print('Downloaded %s from %s' % (file_names[i],urls[i]), file=output_stream)\n\n if not os.path.isfile('modesl/extinction_law.ascii'):\n f = tarfile.open('models/extinction_law.tar.gz', 'r:gz')\n try: f.extractall()\n finally: f.close()", "def write_fit_config( input_path, output_path, output_config_path, config_path =\"./\" ) :\n\n\n\tfit_template = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<config>\n\n\t<!-- Job to run -->\n\t<jobType>TofEffFitter</jobType>\n\t<Task name=\"TofEffFitter\" type=\"TofEffFitter\" config=\"\" nodePath=\"TofEffFitter\" />\n\n\t<TofEffFitter>\n\t\t<Logger color=\"true\" globalLogLevel=\"info\" logLevel=\"all\" />\n\t\t<Reporter> <output url=\"{report_file}\" width=\"700\" height=\"500\" /> </Reporter>\n\n\t\t<input url=\"{input_path}\"/> \n\n\t\t<output path=\"{output_path}\">\n\t\t\t<data>{product_file}</data>\n\t\t\t<params>{params_file}</params>\n\t\t</output>\n\n\t\t<!-- the bins into which the 9 centrality bins are mapped. -->\n\t\t<Include url=\"../common/centralityMap.xml\" />\n\n\t\t<Style>\n\t\t\t<TofEff lw=\"3\" ms=\"1\" mst=\"8\" />\n\t\t</Style>\n\n\t</TofEffFitter>\n\n</config>\"\"\"\n\n\n\treport = pjoin( output_path, \"rp_\" + t_product_file.format( ext=\"pdf\" ) )\n\tproduct = t_product_file.format( ext=\"root\" )\n\tparams = pjoin( output_config_path, t_product_file.format( ext=\"xml\" ) )\n\n\twith open( pjoin( config_path, 'fit.xml' ), 'w' ) as f :\n\t\tf.write( fit_template.format( input_path = output_path, output_path = output_path, params_file = params, product_file=product, report_file=report ) )", "def call_link_reports(args) ->None:\n\n if not args['no_cmd']:\n print_link_reports(args['report-id'])\n if args['yaml']:\n yaml_file(args['report-id'])\n if args['csv']:\n csv_file(args['report-id'])\n if args['json']:\n json_file(args['report-id']) \n\n config.logger.info(\"Link Report generated according to the format chosen by user\")", "def writeInfoOutput(self, output, prettyname):\n # html gubbins\n output.write(\"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.01 Transitional//EN\\\">\\n\")\n output.write(\"<html>\\n\")\n\n # bumf that says file was autogenerated\n self.writeHeaderOutput((\"<!\",\">\"), output, prettyname)\n\n output.write(\"<head>\\n\")\n output.write(\"<title>\" + self.title + \" documentation</title>\\n\")\n output.write(\"<h1>\" + self.title + \" documentation</h1>\\n\")\n output.write(\"</head>\\n\")\n output.write(\"<body>\\n\")\n output.write(\"<p>\" + self.bumfText + \"</p>\\n\")\n output.write(\"\\n<table border=1 cellpadding=5>\\n\")\n output.write(\" <tr>\\n\")\n output.write(\" <th>Symbol</th>\\n\")\n output.write(\" <th>Type</th>\\n\")\n output.write(\" <th>Description</th>\\n\")\n output.write(\" </tr>\\n\")\n\n for d in self.data:\n # now just print it out\n line = d.buildSimpleInfoDeclaration()\n if len(line) != 0:\n output.write(line+\"\\n\\n\")\n output.write(\"</table>\\n<hr>\\n\")\n\n for d in self.data:\n # now just print it out\n line = d.buildFullInfoDeclaration(1)\n if len(line) != 0:\n output.write(line+\"\\n\\n\")\n\n output.write(\"</body>\\n</html>\\n\")", "def download(self, outputfile: str, outputformat: str):\n pass", "def sitesXML(beachdata, outdir='.'):\n\n with open(outdir+'/surf_sites.xml','w') as outp:\n outp.write('<markers>\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('<marker lat=\"%6.3f' %beachdata['lat'][isite] + \\\n '\" lng=\"%6.3f' %beachdata['lon'][isite] + \\\n '\" name=\"' + beachdata['name'][isite].replace(' ','_').replace('/','-') + '\"/>\\r\\n')\n outp.write('</markers>\\r\\n')\n outp.close()", "def makexmlfunc(healpix,ra,dec,week1,week2,distance):\n\t\n\tif week1!=week2:\n\t\tidentity=\"%06d_%d_%d_w%03d_w%03d\" %(healpix,ra,dec,week1,week2)\n\t\tltcube=\"%s/lat_ltcube_weekly_w%03d_w%03d_p203_v001.fits\" %(cfg.home,week1,week2)\n\t\tspacecraft=\"%s/w%03d_w%03d_newspacecraft.fits\" %(cfg.ispace,week1,week2)\n\telse:\n\t\tidentity=\"%06d_%d_%d_w%03d\" %(healpix,ra,dec,week1)\n\t\tltcube=\"%s/lat_spacecraft_weekly_w%03d_p203_v001_ltcube.fits\" %(cfg.home,week1)\n\t\tspacecraft=\"%s/lat_spacecraft_weekly_w%03d_p202_v001.fits \" %(cfg.ispace,week1)\n\n\tregion_filtered=\"%s_region_filtered_gti.fits\" %(identity)\n\tfermisources=\"%s_fermisources_model.xml\" %(identity)\n\tinputmodel=\"%s_input_model.xml\" %(identity)\n\tfermis=\"%s_fermis.xml\" %identity\n\tresponse=\"P7REP_SOURCE_V15\"\n\tmakexmllog=\"%s_output_makexml.log\" %identity\n\tglobal extendedsource\n\tglobal numberofextendedsources\n\textendedlog=\"%s_number_of_extendedsources.log\" %identity\n\tExtendedList=\"ExtendedList.txt\"\n\tOthersList=\"OthersList.txt\"\n\n\t\n\twith open (makexmllog,'r') as outputFile: #opens the makexmllog file from makesyfunc. This document contains info about the extended sources.\n\t\t\n\t\tfor line in outputFile:\n\t\t\t\n\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\tif line.startswith('Added')==True:\n\t\t\t\t\ta,b=line.split('and ')\t\n\t\t\t\t\tb1,b2,b3=b.split(' ')\n\t\t\t\t\n\t\t\t\t\tnumberofextendedsources=int(b1) #b1 is the number of extended sources\n\toutputFile.close()\n\toutputFile=open(inputmodel, 'w')\n\tprint numberofextendedsources\n\n\tif numberofextendedsources==1: #if there is an extended source\n\t\twith open (makexmllog,'r') as outputFile:\n\t\t\n\t\t\tfor line in outputFile:\n\t\t\t\n\t\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\t\tif line.startswith('Extended')==True:\n\t\t\t\t\t\tprint line\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tc,d=line.split(' in')\n\t\t\t\t\t\n\t\t\t\t\t\tc1,c2,c3,c4=c.split(' ')\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\textendedsource=str(c3) #extracts the name of the extended source from makexmllog\n\t\n\n\t\t\n\n\n\t\toutputFile.close()\t\n\n\n\t\n\n\t\twith open(\"%s\" %fermisources) as thefile: #opens the xml file that was created from makesyfunc\n\t\t\tfor line in thefile:\n\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==True:\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspecial=str.replace(line,'%s.fits'%extendedsource,'%s/%s.fits' %(cfg.homesy,extendedsource)) \n\t\t\t\t\tprint special #replace with the correct path to the extendedsource(Templates folder)\n\t\t\t\n\t\t\t\t\tspecial1=str.replace(special,'type=\"SpatialMap\"','type=\"SpatialMap\" map_based_integral=\"true\"')\n\t\t\t\t\tprint special1 #instruction from fermi tutorial, you must add map_based...\n\t\t\t\t\toutputFile=open(fermis, 'w') #write to fermis, the original xml with the right path to the extended source\n\t\t\t\t\twith open(\"%s\" %fermisources,'r') as infile:\n\t\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==False:\n\t\t\t\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputFile.write(special1)\n\t\t\t\t\toutputFile.close()\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\n\t\toutputFile=open(inputmodel, 'w') #final xml file. contains the right path and the source info of \"your\" source.\n\t\twith open(fermis,'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\t\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\t\t\t\n\n\t\toutputFile.close()\n\t\n\t\twith open(\"%s_diffrsp.log\" % (identity), 'w') as outsyputFile: #run diffrsp if you have an extended source.\n\t\t\tsubprocess.call(['%s' %(cfg.pythoncommand),'gtdiffrsp.py', '%s' %(region_filtered),'%s' %(spacecraft), '%s' %inputmodel, '%s' %(response),'%s' %identity ],stdout=outsyputFile)\n\t\t\t\n\t\twith open(ExtendedList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\t\t\t\t\n\tif numberofextendedsources==0: #if there is no extended source\n\t\toutputFile=open('%s' %(inputmodel), 'w') #write to inputmodel, \"your\" source\n\t\twith open('%s' %(fermisources),'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\n\t\t\t\n\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\toutputFile.close()\n\tif numberofextendedsources>1:\n\t\twith open(OthersList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\n\tif numberofextendedsources==1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\\n\\\n \t%s\"%(numberofextendedsources,extendedsource))\n\t\toutsyputFile.close()\n\n\tif numberofextendedsources !=1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\" %(numberofextendedsources))\n\t\toutsyputFile.close()", "def download_second_request(url=None, filename=None, **kwargs):\n with open(filename, 'w') as output:\n output.write('some successful second response XML')", "def write_downloaded_links():\n global downloaded_links_fn\n text_file = open(downloaded_links_fn,\"w\")\n for link in downloaded_links.items():\n text_file.write(link[0] + \"\\n\")\n text_file.close()", "def write_sitemap ( self ):\n try:\n self.output_fd = open ( file=dflt_cfg.DFLT_CFG[ OUTPUT_PATH ], mode='w' )\n self.print_url_links ( self.root )\n except (PermissionError, AttributeError) as err:\n self.logger.error ( \"Error {0} occurred. Output file {1} cannot be created\".format ( err, \\\n dflt_cfg.DFLT_CFG[\n OUTPUT_PATH ] ) )\n except Exception as err:\n self.logger.error ( \"Error {0} occurred while writing sitemap in output file: {1}\".format ( err, \\\n dflt_cfg.DFLT_CFG[ OUTPUT_PATH ] ) )\n self.output_fd.close ( )\n else:\n print(\"Sitemap for {} is written in {}.\".format(dflt_cfg.DFLT_CFG[DOMAIN], dflt_cfg.DFLT_CFG[ OUTPUT_PATH ]))\n print( \"Logs (Broken or dead URLs along with application logs) for domain {0} are available in {1} directory.\".format ( dflt_cfg.DFLT_CFG[DOMAIN], \"./logs\" ) )\n self.output_fd.close ( )", "def collectLinks(self, output):\n pass", "def _go_through_summary_reports(self):\n\n for result_file in self.result_files:\n self.cur_8digit_dir = os.path.split(result_file)[0]\n try:\n with open(result_file) as f_in:\n sum_rep = json.load(f_in)\n if sum_rep.has_key('Artifacts'):\n for linked_artifact in sum_rep['Artifacts']:\n artifact_path = linked_artifact['Path']\n # For now assume only files are linked (no folders)\n rel_path_from_results = os.path.join(self.cur_8digit_dir, artifact_path)\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results',\n rel_path_from_results))\n if artifact_path.endswith('.json'):\n function_tag = artifact_path.replace('.','_').replace('/','_')\n\n if hasattr(self, function_tag):\n getattr(self, function_tag)()\n except IOError:\n print '{0} does not exist on this filesystem. I cannot be check for references '\\\n 'to other files.'.format(result_file)", "def get_export(pages_param, out_filename):\r\n\r\n url = 'http://lesswrong.wikia.com/index.php?title=Special:Export&action=submit'\r\n data = urllib.urlencode({'catname': '', 'pages': pages_param, 'templates': '1'})\r\n feed = urllib2.urlopen(url, data)\r\n buf = feed.read()\r\n\r\n out = open(out_filename, 'w')\r\n out.write(buf)\r\n\r\n print 'Export saved %s' % out_filename", "def test_download_links():\n\n # dir to download data to\n out_dir = 'test/download_data'\n\n # remove out_dir if it already exists and make a new one\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.system('mkdir -p %s'%out_dir)\n\n # List of all available fits\n fit_names = surfinBH.fits_collection.keys()\n for name in fit_names:\n surfinBH.DownloadData(name=name, data_dir=out_dir)\n\n # allow for both naming formats surfinBH7dq2 and NRSur7dq4Remnant\n if 'surfinBH' in name:\n name_tag = name.split('surfinBH')[-1]\n else:\n name_tag = name.split('NRSur')[-1].split('Remnant')[0]\n\n # check that it has the right name\n assert(os.path.isfile('%s/fit_%s.h5'%(out_dir, name_tag)))\n # check that the fit_name matches with the name in the attributes\n # of h5 file.\n h5file = h5py.File('%s/fit_%s.h5'%(out_dir, name_tag), 'r')\n assert(name_tag == h5file.attrs['name'].decode('utf-8'))\n h5file.close()", "def output(\n self,\n fileformat,\n **keywords\n ):\n \n # add the default parameters, they will be checked against the keywords\n defaults = {\n 'ref':'cogid',\n 'entry':'concept',\n 'missing':0,\n 'filename':'lingpy-{0}'.format(str(date.today())),\n }\n \n # compare with keywords and add missing ones\n for key in defaults:\n if key not in keywords:\n keywords[key] = defaults[key]\n\n if fileformat == 'paps.nex':\n paps = self.get_paps(\n ref=keywords['ref'],\n entry=keywords['entry'],\n missing=keywords['missing']\n )\n pap2nex(\n self.cols,\n paps,\n missing=keywords['missing'],\n filename=keywords['filename']+'.paps'\n )\n\n if fileformat == 'taxa':\n out = ''\n for col in self.cols:\n out += col + '\\n'\n f = open(keywords['filename'] + '.taxa','w')\n f.write(out)\n f.close()", "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def LinkAnat(self):\n\n if self.anatomical is None:\n return\n for entry in self.info.keys():\n info = self.info[entry]\n if info.has_key('anat_link'):\n self.LinkFiles(info['outdir'], self.anatomical)", "def export_xml(self, filename, full_export = False):\n \n # Private functions to write blocks of text\n # --------------------------\n def print_openrave(f, model):\n # print_openrave - OpenRAVE data\n # For compatibility only...\n f.write( ' <Openrave>\\n')\n f.write( ' <name>{0}</name>\\n'.format(model.name))\n f.write( ' <xml>{0}</xml>\\n'.format(model.or_xml))\n f.write( ' <transf>')\n for n in model.or_transf.flat[:]:\n f.write('{0:6f}'.format(n))\n f.write( '</transf>\\n')\n f.write( ' </Openrave>\\n')\n\n # --------------------------\n def print_Points(f, model, full_export):\n # print_Points - Print all Point3D entries\n\n f.write( ' <Points>\\n')\n for i in range(model.pts3D.shape[1]):\n print_Point(f, model, i)\n \n if full_export:\n for j in range(model.pt_info[i].desc.shape[0]):\n print_observ(f, model.pt_info[i], j, \\\n self.desc_name[self.desc_type[i]])\n f.write( '</Point>\\n');\n \n f.write( ' </Points>\\n');\n\n # --------------------------\n def print_observ(f, pt, idx_pt, desc_name):\n # <Observation camera_id=\"n\" desc_type=\"SIFT\" loc=\"x;y;scale;orientation\"\n # desc=\"a;b;c;...\">\n f.write( ' <Observation ');\n f.write( 'camera_id=\"{0}\" '.format(pt.cam_id[idx_pt]))\n f.write( 'desc_type=\"{0}\" '.format(desc_name))\n f.write( 'loc=\"')\n for l in pt.locs[idx_pt, :].ravel():\n f.write('{0:6f} '.format(l))\n f.write( '\" ')\n f.write( 'desc=\"')\n for d in pt.desc[idx_pt, :].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\"/>\\n')\n\n # --------------------------\n def print_Point(f, model, idx_pt):\n # <Point p3d=\"x;y;z\" nviews=\"\" avg_err=\"\" color=\"R;G;B\" desc_type=\"SIFT\"\n # desc=\"a;b;c;...\">\n f.write( ' <Point ');\n f.write( 'p3d=\"{0:6f} {1:6f} {2:6f}\" '.format(model.pts3D[0, idx_pt], \\\n model.pts3D[1, idx_pt], \\\n model.pts3D[2, idx_pt]))\n f.write( 'nviews=\"{0:d}\" '.format(model.num_views[idx_pt]))\n f.write( 'avg_err=\"{0:6f}\" '.format(model.avg_err[idx_pt]))\n f.write( 'color=\"{0} {1} {2}\" '.format(model.color3D[0,idx_pt], \\\n model.color3D[1,idx_pt], \\\n model.color3D[2,idx_pt]))\n f.write( 'desc_type=\"{0}\" '\\\n .format(model.desc_name[ model.desc_type[idx_pt] ]))\n f.write( 'desc=\"')\n for d in model.desc[idx_pt].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\">\\n')\n\n # --------------------------\n def print_Cameras(f, model):\n # print_Cameras - Print all Camera entries\n\n f.write( ' <Cameras>\\n')\n for idx, cam in enumerate(model.cam_poses.T):\n print_Camera(f, cam, idx)\n f.write( ' </Cameras>\\n')\n\n # --------------------------\n def print_Camera(f, cpose, idx_cam):\n # print_Camera - Camera entry\n\n f.write( ' <Camera ')\n f.write( 'id=\"{0}\" '.format(idx_cam))\n f.write( 'rot_type=\"quat\" ')\n q_t = tf_format.tf_format('quat', cpose)\n f.write( 'rot=\"')\n for val in q_t[:4].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\" ')\n f.write( 'tx=\"')\n for val in q_t[4:].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\"/>\\n')\n\n # --------------------------\n # Print data to file\n\n # First, update structures\n self.getNumViews()\n self.getNumPointsInCam()\n self.getAverageErr()\n\n with open(filename, 'w') as f:\n f.write('<Model name=\"{0}\" version=\"{1}\">\\n'.format(self.name, \\\n self.version) )\n # print_openrave(f, model)\n print_Points(f, self, full_export)\n if full_export:\n print_Cameras(f, self)\n f.write('</Model>\\n')", "def write_package_index(self, package, files):\n self.logger.info('writing index for %s', package)\n pkg_dir = self.output_path / 'simple' / package\n mkdir_override_symlink(pkg_dir)\n with tempfile.NamedTemporaryFile(mode='w', dir=str(pkg_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Links for {}'.format(package))\n ),\n tag.body(\n tag.h1('Links for {}'.format(package)),\n ((tag.a(\n f.filename,\n href='{f.filename}#sha256={f.filehash}'.format(f=f), # noqa: E501\n rel='internal'), tag.br())\n for f in files)\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name, str(pkg_dir / 'index.html'))\n try:\n # Workaround for #20: after constructing the index for a\n # package attempt to symlink the \"canonicalized\" package\n # name to the actual package directory. The reasons for\n # doing things this way are rather complex...\n #\n # The older package name must exist for the benefit of\n # older versions of pip. If the symlink already exists *or\n # is a directory* we ignore it. Yes, it's possible to have\n # two packages which both have the same canonicalized name,\n # and for each to have different contents. I don't quite\n # know how PyPI handle this but their XML and JSON APIs\n # already include such situations (in a small number of\n # cases). This setup is designed to create canonicalized\n # links where possible but not to clobber \"real\" packages\n # if they exist.\n #\n # What about new packages that want to take the place of a\n # canonicalized symlink? We (and TransferState.commit)\n # handle that by removing the symlink and making a\n # directory in its place.\n canon_dir = pkg_dir.with_name(canonicalize_name(pkg_dir.name)) # noqa: E501\n canon_dir.symlink_to(pkg_dir.name)\n except FileExistsError:\n pass", "def extract_urls_from_file(f, all_abns, links_existed):\n content = open(CURL_OUTPUT + f).read()\n soup = BeautifulSoup(content)\n\n fh = open(ALL_LINKS + 'all_links.txt', 'a')\n\n cnt = 0\n all_rows = soup.find_all('tr', {'class': 'rgRow'})\n for row in all_rows:\n all_cells = row.find_all('td')\n abn = all_cells[0].text\n if (abn in all_abns):\n link = all_cells[1].findChildren('a')[0]['href']\n if not link in links_existed:\n print(link)\n download_page(link, f, cnt)\n fh.write(link + '\\n')\n cnt = cnt + 1\n\n fh.close()", "def test_with_links(self):\n self.result.figure_link = 'some_link'\n self.result.start_figure_link = 'other_link'\n figure_link, start_link = fitting_report.get_figure_paths(self.result)\n self.assertEqual(figure_link, os.path.join('figures', 'some_link'))\n self.assertEqual(start_link, os.path.join('figures', 'other_link'))", "def url_to_file():\n urls = argToList(demisto.getArg('urls'))\n files = []\n for i in range(len(urls)):\n fileEntry = fileResult('url_' + str(i + 1), '[InternetShortcut]\\nURL=' + str(urls[i]))\n files.append(fileEntry)\n demisto.results(files)", "def create_xml_atlas(lfiles, foxml, oid=\"face\"):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n for i, fn in enumerate(lfiles):\n e = doc.createElement('subject')\n e.setAttribute('id', \"subj{}\".format(i))\n\n v = doc.createElement('visit')\n v.setAttribute('id', \"experiment\")\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', oid)\n\n t = doc.createTextNode(os.path.abspath(fn))\n\n f.appendChild(t)\n v.appendChild(f)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def report(self, output_dir):", "def link(self, fname):\n return fname", "def link_function_calls(tu, all_nodes, annotation_set, src_to_output,\n anchored_nodes):\n fn_calls = find_cursor_kind(tu.cursor, cindex.CursorKind.CALL_EXPR)\n fn_calls = [fn for fn in fn_calls if fn.location.file.name == tu.spelling]\n\n for call in fn_calls:\n defn = find_reference_definition(call, all_nodes)\n if defn is None:\n continue\n\n file = defn.location.file.name\n if file not in src_to_output:\n continue\n\n extent = cindex.Cursor_spellingNameRange(call, 0, 0)\n\n target_file = src_to_output[file]\n target_hash = str(defn.hash)\n target_href = target_file + '#' + target_hash\n\n annotation_set.add_tag('a',\n [('href', target_href)],\n extent)\n\n anchored_nodes[defn.hash] = defn", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())" ]
[ "0.6116024", "0.53646415", "0.53519595", "0.52838266", "0.5276019", "0.5173674", "0.5160593", "0.515688", "0.5144383", "0.51263654", "0.51257384", "0.50988704", "0.5097096", "0.50741714", "0.50694925", "0.5046129", "0.5036165", "0.50250024", "0.50080687", "0.49870986", "0.4951259", "0.49510527", "0.49496675", "0.49381593", "0.4929215", "0.49264264", "0.49072802", "0.49010777", "0.48796502", "0.48655325" ]
0.60268843
1
FITS file summary Read back the FITS file, display numerical column information. %(fits_summary_table)s flux13 and unc_flux13 are not in the FITS file, but set to [Unc_]Flux_Density 1e13 for numerical display.
def fits_summary(self): t = pyfits.open(self.fits_file)[1].data # remove columns that have multiple dimensions for j in range(3):#??? why for i,col in enumerate(t.columns): if len(col.array.shape)>1: t.columns.del_col(i) tt=pyfits.BinTableHDU.from_columns(t.columns) df = pd.DataFrame(tt.data) df['flux13*'] = df['Flux_Density']*1e13 df['unc_flux13*'] = df['Unc_Flux_Density']*1e13 summary = html_table(df.describe().T, float_format=FloatFormat(3), heading='', href=False, maxlines=50) self.fits_summary_table = summary.replace('%', '%%') # creates error?? #print ('Check: %s' % df)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary_info_aeff(filename):\n # filename = self.out_filename('aeff')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='AEFF_2D')\n\n data = dict()\n\n # Copy over header info to the summary table\n data['LO_THRES'] = table.meta['LO_THRES']\n data['HI_THRES'] = table.meta['HI_THRES']\n\n # Summary stats on IRF file content\n data['EFFAREA_MAX'] = table['EFFAREA'].max()\n data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max() \n return data", "def summary_info_aeff(filename):\n # filename = self.out_filename('aeff')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='AEFF_2D')\n\n data = dict()\n\n # Copy over header info to the summary table\n data['LO_THRES'] = table.meta['LO_THRES']\n data['HI_THRES'] = table.meta['HI_THRES']\n\n # Summary stats on IRF file content\n data['EFFAREA_MAX'] = table['EFFAREA'].max()\n data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()\n\n return data", "def read_from_fits(self, filename, unit='f_lam'):\n\n # Open the fits file\n try:\n hdu = fits.open(filename)\n except:\n raise ValueError(\"Filename not found\", str(filename))\n\n # Read in header information\n crval = hdu[0].header['CRVAL1']\n try:\n cd = hdu[0].header['CD1_1']\n except:\n print(\"CD1_1 keyword not found, using CDELT1\")\n cd = hdu[0].header['CDELT1']\n\n crpix = hdu[0].header['CRPIX1']\n naxis = hdu[0].header['NAXIS1']\n\n\n self.unit = unit\n\n # Read in object flux\n if np.ndim(hdu[0].data) == 3:\n try:\n self.flux = np.array(hdu[0].data[0, 0, :])\n self.flux_err = np.array(hdu[0].data[3, 0, :])\n except:\n self.flux = np.array(hdu[0].data[0,0,:])\n self.flux_err = np.array(hdu[0].data[1,0,:])\n else:\n self.flux = np.array(hdu[0].data[:])\n self.flux_err = None\n\n\n # Calculate dispersion axis from header information\n crval = crval - crpix\n self.dispersion = crval + np.arange(naxis) * cd\n\n self.raw_flux = self.flux\n self.mask = np.ones(self.dispersion.shape, dtype=bool)\n self.raw_flux_err = self.flux_err\n self.raw_dispersion = self.dispersion\n\n self.header = hdu[0].header", "def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )", "def summary_stats(tile_summary):\n return \"Original Dimensions: %dx%d\\n\" % (tile_summary.orig_w, tile_summary.orig_h) + \\\n \"Original Tile Size: %dx%d\\n\" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \\\n \"Scale Factor: 1/%dx\\n\" % tile_summary.scale_factor + \\\n \"Scaled Dimensions: %dx%d\\n\" % (tile_summary.scaled_w, tile_summary.scaled_h) + \\\n \"Scaled Tile Size: %dx%d\\n\" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \\\n \"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\\n\" % (\n tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \\\n \"Tiles: %dx%d = %d\\n\" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \\\n \" %5d (%5.2f%%) tiles >=%d%% tissue\\n\" % (\n tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\\n\" % (\n tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,\n TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\\n\" % (\n tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \\\n \" %5d (%5.2f%%) tiles =0%% tissue\" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)", "def show_feature_summary(df, colname, display_uniques=False):\n\tprint('Details of feature:',colname)\n\tprint(' - datatype:',df[colname].dtypes)\n\tprint(' - col.size:',df[colname].shape)\n\tprint(' - NaN.vals:',df[colname].isnull().sum())\n\tif (display_uniques): print(' - uniqvals:',get_unique_values(df, colname))\n\tif (display_uniques): print(' - cnt.vals:',get_unique_counts(df, colname))\n\tprint(\"\\n\")", "def display_summary_statistics(tx, column_names=None):\n \n N, D = tx.shape\n \n mean = tx.mean(axis=0)\n median = np.median(tx, axis=0)\n std = tx.std(axis=0)\n max_ = tx.max(axis=0)\n min_ = tx.min(axis=0)\n n_undef = (tx <= -999.0).sum(axis=0)\n pct_undef = (tx <= -999.0).mean(axis=0) * 100\n\n column_names = column_names if column_names is not None else range(D)\n \n print(\" Column | Mean | Median | Std dev | Max | Min | # Undefined | % Undef \")\n for i, (col, m, med, s, mx, mn, nu, pu) in enumerate(zip(column_names, mean, median, std, max_, min_, n_undef, pct_undef)):\n print(f\"{i:2}-{col:27} | {m:8.3f} {med:8.3f} {s:8.3f} {mx:8.3f} \" + \n f\"{mn:8.3f} {nu:10.3f} {pu:7.3f}\")", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def get_flt_info(files=[], columns=['FILE', 'FILTER', 'INSTRUME', 'DETECTOR', 'TARGNAME', 'DATE-OBS', 'TIME-OBS', 'EXPSTART', 'EXPTIME', 'PA_V3', 'RA_TARG', 'DEC_TARG', 'POSTARG1', 'POSTARG2']):\n import astropy.io.fits as pyfits\n from astropy.table import Table\n \n if not files:\n files=glob.glob('*flt.fits')\n \n N = len(files)\n \n data = []\n\n for i in range(N):\n line = [os.path.basename(files[i]).split('.gz')[0]]\n if files[i].endswith('.gz'):\n im = pyfits.open(files[i])\n h = im[0].header\n else:\n h = pyfits.Header().fromfile(files[i])\n \n filt = get_hst_filter(h)\n line.append(filt)\n has_columns = ['FILE', 'FILTER']\n for key in columns[2:]:\n if key in h:\n line.append(h[key])\n has_columns.append(key)\n else:\n continue\n \n data.append(line)\n \n tab = Table(rows=data, names=has_columns)\n return tab", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def loadFromFITSFile(filename, maxRows=0):\n\tultracam = False\n\tultraspec = False\n\tinputFile = astropy.io.fits.open(filename)\n\tfileInfo = inputFile.info()\n\t\n\tprint fileInfo\n\tprint len(inputFile)\n\tif len(inputFile)==4:\n\t\tprint \"We have an ULTRACAM file...\"\n\t\tultracam = True\n\tif len(inputFile)==2:\n\t\tprint \"We have an ULTRASPEC file...\"\n\t\tultraspec = True\n\t\n\tif ultraspec:\n\t\tCCDs = ['CCD 1']\n\tif ultracam:\n\t\tCCDs = ['CCD 1', 'CCD 2', 'CCD 3']\n\t\n\t\n\theaderBlock = str(inputFile[0].header)\n\t\n\t# Get some header info\n\ttargetName = generalUtils.getKeyValueFromFITSHeader('target', headerBlock)\n\tfilterName = generalUtils.getKeyValueFromFITSHeader('filter', headerBlock)\n\trunName = generalUtils.getKeyValueFromFITSHeader('Data file name', headerBlock, terminator=' ')\n\t\n\tfor CCD in CCDs:\n\t\theaders = inputFile[CCD].header\n\t\tdata = inputFile[CCD].data\n\t\tcolumns = inputFile[CCD].columns\n\t\n\t\tallData = []\n\t\n\t\tfor index, item in enumerate(data):\n\t\t\tallData.append(item)\n\t\t\tif maxRows!=0 and index>=maxRows-1: break\n\t\n\t\trows = len(allData)\n\t\tsys.stdout.write(\"\\rRead %d lines with the following columns, %s\\n\"%(rows, str(columns.names)))\n\t\tsys.stdout.flush()\n\t\n\t\t# Count the number of apertures in this data (using this method, the max is 9!)\n\t\tmaxApertureIndex = 0\n\t\tfor column in columns.names:\n\t\t\ttry:\n\t\t\t\tapertureIndex = int(column[-1])\n\t\t\texcept ValueError:\n\t\t\t\tapertureIndex = 0\n\t\t\tif apertureIndex > maxApertureIndex:\n\t\t\t\tmaxApertureIndex = apertureIndex\n\t\tprint \"This data file has %d apertures.\"%(maxApertureIndex)\n\t\n\t\tMJDIndex = columns.names.index('MJD')\n\t\tfor aperture in range(1, maxApertureIndex+1):\n\t\t\tprint \"Loading data for aperture #\", aperture\n\t\t\n\t\t\tphotometry = {}\n\t\t\tphotometry['MJD'] = \t\tdata.field('MJD')\n\t\t\tphotometry['exposure'] = \tdata.field('Expose')\n\t\t\tphotometry['FWHM'] = \t\tdata.field('FWHM')\n\t\t\tphotometry['beta'] = \t\tdata.field('beta')\n\t\t\tphotometry['x'] = \t\t\tdata.field('X_' + str(aperture))\n\t\t\tphotometry['y'] = \t\t\tdata.field('Y_' + str(aperture))\n\t\t\tphotometry['counts'] = \t\tdata.field('Counts_' + str(aperture))\n\t\t\tphotometry['sigma'] = \t\tdata.field('Sigma_' + str(aperture))\n\t\t\tphotometry['sky'] = \t\tdata.field('Sky_' + str(aperture))\n\t\t\tphotometry['sigma'] = \t\tdata.field('Sigma_' + str(aperture))\n\t\t\tphotometry['error'] = \t\tdata.field('Eflag_' + str(aperture))\n\t\t\n\t\t\tid = slots.getNextSlotID()\n\t\t\tprint \"new ID:\", id\n\t\t\tslot = photometryClasses.slotObject(id)\n\t\t\tslot.channels = ['ULTRASPEC']\n\t\t\tslot.target = targetName\n\t\t\tslot.filter = filterName\n\t\t\tslot.aperture = aperture\n\t\t\tslot.headers = headerBlock\n\t\t\tslot.runName = runName\n\t\t\tslot.setPhotometry(photometry)\n\t\t\tslot.setTimeColumn('MJD')\n\t\t\tslot.setYColumn('counts')\n\t\t\tslot.setYError('sigma')\n\t\t\tslot.CCD = CCD\n\t\t\tnumSlots = slots.addSlot(slot)\n\t\t\t# print \"Added the data to a new slot. Total number of slots is now: %d\"%(numSlots)\n\t\t\tprint slot\n\t\n\tinputFile.close()\n\t\t\n\treturn", "def print_summary(column, data):\n print(data[column].describe())\n print()\n print('Количество уникальных значений:', data[column].nunique())\n print('Количество пустых значений:', data[column].isnull().sum())", "def read_pypeit_fits_new(self, filename, unit='f_lam', exten=1):\n\n # Open the fits file\n try:\n hdu = fits.open(filename)\n except:\n raise ValueError(\"Filename not found\", str(filename))\n\n self.header = hdu[0].header\n self.unit = unit\n\n # Check pypeit header keywords\n #\n # dispersion\n if 'OPT_WAVE' in hdu[exten].columns.names:\n self.dispersion = hdu[exten].data['OPT_WAVE']\n if 'wave' in hdu[exten].columns.names:\n self.dispersion = hdu[exten].data['wave']\n # flux density\n if 'OPT_FLAM' in hdu[exten].columns.names:\n self.flux = hdu[exten].data['OPT_FLAM']* 1e-17\n if 'flux' in hdu[exten].columns.names:\n self.flux = hdu[exten].data['flux']* 1e-17\n\n # mask\n if 'OPT_MASK' in hdu[exten].columns.names:\n self.mask = np.array(hdu[exten].data['OPT_MASK'], dtype=bool)\n if 'mask' in hdu[exten].columns.names:\n self.mask = np.array(hdu[exten].data['mask'], dtype=bool)\n\n # ivar\n if 'OPT_FLAM_IVAR' in hdu[exten].columns.names:\n self.flux_ivar = hdu[exten].data['OPT_FLAM_IVAR']\n if 'ivar' in hdu[exten].columns.names:\n self.flux_ivar = hdu[exten].data['ivar']\n if 'sigma' not in hdu[exten].columns.names:\n # No flux density 1 sigma error stored in this format\n # Calculate the 1 sigma error.\n self.get_fluxden_error_from_ivar()\n # 1 sigma flux density error\n if 'OPT_FLAM_SIG' in hdu[exten].columns.names:\n self.flux_err = hdu[exten].data['OPT_FLAM_SIG'] * 1e-17\n\n\n\n # Mask all pixels where the flux error is 0\n new_mask = np.ones_like(self.mask, dtype=bool)\n new_mask[self.flux_err == 0] = 0\n self.mask = new_mask\n\n # self.dispersion_unit = 1. * u.AA\n # self.fluxden_unit = 1e-17*u.erg/u.s/u.cm**2/u.AA\n\n if 'TELLURIC' in hdu[exten].columns.names:\n self.telluric = hdu[exten].data['TELLURIC']\n if 'OBJ_MODEL' in hdu[exten].columns.names:\n self.obj_model = hdu[exten].data['OBJ_MODEL']* 1e-17", "def read_iact_fits_file(iact_file, sed_name, flux_unit='TeV-1 s-1 m-2'):\n\n data = Table.read(iact_file)\n m = data[\"SOURCE_FULL\"] == sed_name\n if not np.sum(m):\n raise ValueError(\"{0:s} not in list: {1}\".format(sed_name, data[\"SOURCE_FULL\"]))\n x = data[\"E_REF\"][m].data\n y = data[\"NORM\"][m].data * u.Unit(flux_unit).to(\"TeV-1 s-1 cm-2\")\n dy = 0.5 * (data[\"NORM_ERRN\"][m] + data[\"NORM_ERRP\"][m]).data * u.Unit(flux_unit).to(\"TeV-1 s-1 cm-2\")\n\n mask = np.isfinite(x)\n log_xerr = np.insert(np.diff(np.log10(x[mask])), 0, np.diff(np.log10(x[mask]))[0])\n log_x_edges = np.append(np.log10(x[mask]) - log_xerr / 2.,\n np.log10(x[mask][-1]) + log_xerr[-1] / 2.)\n x_edges = np.power(10., log_x_edges)\n x_min = x_edges[:-1]\n x_max = x_edges[1:]\n x_cen = np.sqrt(x_edges[1:] * x_edges[:-1])\n\n return FitIACTFermi(x_cen, y[mask], dy[mask],\n data[\"REDSHIFT\"][m].data[0],\n x_min=x_min, x_max=x_max)", "def read_magic_fits_file(magic_file, redshift, flux_unit='TeV-1 s-1 cm-2', hdu=2, energy_unit='TeV'):\n sed = Table.read(magic_file, hdu=hdu)\n x_cen = sed['energy'].to(energy_unit)\n dx = sed['Denergy'].to(energy_unit)\n\n sed['flux'].unit = u.Unit(sed['flux'].unit.to_string().replace(\"ph\", \"\"))\n sed['Dflux'].unit = sed['flux'].unit\n\n y = (sed['flux'] / x_cen.to('TeV') ** 2.).to(flux_unit)\n dy = (sed['Dflux'] / x_cen.to('TeV') ** 2.).to(flux_unit)\n x_edges = np.append(x_cen - dx / 2., x_cen[-1] + dx[-1] / 2.)\n x_min = x_edges[:-1]\n x_max = x_edges[1:]\n\n return FitIACTFermi(x_cen.to(\"TeV\").value, y.value, dy.value,\n redshift,\n x_min=x_min.to(\"TeV\").value, x_max=x_max.to(\"TeV\").value)", "def read_pypeit_fits(self, filename, unit='f_lam', exten=1):\n\n # Open the fits file\n try:\n hdu = fits.open(filename)\n except:\n raise ValueError(\"Filename not found\", str(filename))\n\n self.header = hdu[0].header\n self.unit = unit\n self.dispersion = hdu[exten].data['OPT_WAVE']\n self.raw_dispersion = hdu[exten].data['OPT_WAVE']\n self.flux = hdu[exten].data['OPT_FLAM'] * 1e-17\n self.raw_flux = hdu[exten].data['OPT_FLAM'] * 1e-17\n self.mask = np.array(hdu[exten].data['OPT_MASK'], dtype=bool)\n self.flux_ivar = hdu[exten].data['OPT_FLAM_IVAR']\n self.flux_err = hdu[exten].data['OPT_FLAM_SIG'] * 1e-17\n self.raw_flux_err = self.flux_err\n\n # Mask all pixels where the flux error is 0\n new_mask = np.ones_like(self.mask, dtype=bool)\n new_mask[self.flux_err == 0] = 0\n self.mask = new_mask\n\n\n if 'TELLURIC' in hdu[exten].columns.names:\n self.telluric = hdu[exten].data['TELLURIC']\n if 'OBJ_MODEL' in hdu[exten].columns.names:\n self.obj_model = hdu[exten].data['OBJ_MODEL']*1e-17", "def generate_summary(final_dictionary):\n otpt = open('multifind_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0] + ': ' + str(len(cat[1])) + '\\n'\n otpt.write(category_name)\n for entry in cat[1]:\n otpt.write('\\t' + str(entry[0]) + '\\n')\n otpt.write('\\t\\tTotal Entries: %s\\n' % str(entry[1]))\n otpt.write('\\t\\tUnique Species: %s\\n' % str(entry[2]))\n count = 0\n for sp in entry[3]:\n if count < entry[2]-1:\n if count == 0:\n otpt.write('\\t\\tSpecies: ' + sp + ', ')\n else:\n otpt.write(sp + ', ')\n else:\n otpt.write(sp + '\\n')\n count += 1\n otpt.close()", "def read_fits_spec(fname):\n\timport pyfits as pf\n\n\thdu = pf.open(fname)\n\twave \t = hdu[1].data.wavelength\n\tflux \t = hdu[1].data.flux\n\tdf \t\t = hdu[1].data.df\n\tdf_plus = hdu[1].data.df_plus\n\tdf_minus = hdu[1].data.df_minus\n\treturn wave,flux,df,df_plus,df_minus", "def test_fitsheader():\n extensions = ('fts', 'fits')\n for ext in extensions:\n for ffile in Path(testpath).glob(f\"*.{ext}*\"):\n fits_file = fits.open(ffile)\n fits_file.verify(\"fix\")\n data, header = fits_file[0].data, fits_file[0].header\n meta_header = MetaDict(OrderedDict(header))\n sunpy.io.fits.header_to_fits(meta_header)", "def print_summary(self):\n\t\t\n\t\tif not self.objects:\n\t\t\tsys.stderr.write(\"No objects.\\n\")\n\t\t\treturn\n\t\t\n\t\t# Summary header data\n\t\theader = (\"ok\", \"error\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Summary header format\n\t\tfield = \" %11s\"\n\t\tfmt = field * len(header)\n\t\twidth = len(field % \"\") * len(header)\n\t\ts_line = \"-\" * width\n\t\td_line = \"=\" * width\n\t\t\n\t\t# Verbose header data\n\t\tvheader = (\"ok?\", \"type\", \"id\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Verbose header format\n\t\tvfmt = \" %3s %7s\" + field * 5\n\t\t\n\t\t# Summary data\n\t\tc_ratio = None\n\t\to_ok = o_error = 0\n\t\tz_data_size = x_data_size = o_data_size = 0\n\t\t\n\t\tif self.verbose:\n\t\t\tprint vfmt % vheader\n\t\t\tprint s_line\n\t\t\n\t\t# Gather data from objects\n\t\tfor obj in self.objects:\n\t\t\tif obj.v_all:\n\t\t\t\to_ok += 1\n\t\t\t\tif obj.z_data_size: z_data_size += obj.z_data_size\n\t\t\t\tif obj.x_data_size: x_data_size += obj.x_data_size\n\t\t\t\tif obj.o_data_size: o_data_size += obj.o_data_size\n\t\t\telse:\n\t\t\t\to_error += 1\n\t\t\t\n\t\t\tif self.verbose:\n\t\t\t\tv_c_ratio = None\n\t\t\t\t\n\t\t\t\t# Calculate compression if possible\n\t\t\t\tif obj.z_data_size and obj.x_data_size:\n\t\t\t\t\tv_c_ratio = str(100 * obj.z_data_size / obj.x_data_size) + \"%\"\n\t\t\t\t\n\t\t\t\t# Build verbose data\n\t\t\t\tv_data = (\n\t\t\t\t\t\"[Y]\" if obj.v_all else \"[N]\",\n\t\t\t\t\tobj.o_data_type or \"N/A\",\n\t\t\t\t\tobj.id[:10],\n\t\t\t\t\tobj.z_data_size or \"N/A\",\n\t\t\t\t\tobj.x_data_size or \"N/A\",\n\t\t\t\t\tobj.o_data_size or \"N/A\",\n\t\t\t\t\tv_c_ratio or \"N/A\"\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# Print verbose data\n\t\t\t\tprint vfmt % v_data\n\t\t\n\t\tif self.verbose:\n\t\t\tprint d_line\n\t\t\n\t\t# Calculate compression ratio\n\t\tif z_data_size and x_data_size:\n\t\t\tc_ratio = str(100 * z_data_size / x_data_size) + \"%\"\n\t\t\n\t\t# Print summary\n\t\tprint fmt % header\n\t\tprint s_line\n\t\tprint fmt % (o_ok, o_error, z_data_size, x_data_size, o_data_size, c_ratio)", "def _parse_summary(self):\r\n if self._is_at_section():\r\n return\r\n\r\n summary = self._doc.read_to_next_empty_line()\r\n summary_str = \" \".join([s.strip() for s in summary]).strip()\r\n if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str):\r\n self['Signature'] = summary_str\r\n if not self._is_at_section():\r\n self['Summary'] = self._doc.read_to_next_empty_line()\r\n else:\r\n self['Summary'] = summary\r\n\r\n if not self._is_at_section():\r\n self['Extended Summary'] = self._read_to_next_section()", "def _parse_summary(self):\r\n if self._is_at_section():\r\n return\r\n\r\n summary = self._doc.read_to_next_empty_line()\r\n summary_str = \" \".join([s.strip() for s in summary]).strip()\r\n if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str):\r\n self['Signature'] = summary_str\r\n if not self._is_at_section():\r\n self['Summary'] = self._doc.read_to_next_empty_line()\r\n else:\r\n self['Summary'] = summary\r\n\r\n if not self._is_at_section():\r\n self['Extended Summary'] = self._read_to_next_section()", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')", "def summary(self):\n self.tiles.refreshnames()\n self.glues.refreshnames()\n # self.check_consistent()\n info = {\n \"ntiles\": len(self.tiles),\n \"nrt\": len([x for x in self.tiles if not x.is_fake]),\n \"nft\": len([x for x in self.tiles if x.is_fake]),\n \"nends\": len(self.glues),\n \"ntends\": len(self.tiles.glues_from_tiles()),\n \"tns\": \" \".join(x.name for x in self.tiles if x.name),\n \"ens\": \" \".join(x.name for x in self.glues if x.name)\n # if (\"info\" in self.keys() and \"name\" in self[\"info\"].keys())\n # else \"\",\n }\n tun = sum(1 for x in self.tiles if x.name is None)\n if tun > 0:\n info[\"tns\"] += \" ({} unnamed)\".format(tun)\n eun = sum(1 for x in self.glues if x.name is None)\n if eun > 0:\n info[\"ens\"] += \" ({} unnamed)\".format(eun)\n if info[\"nft\"] > 0:\n info[\"nft\"] = \" (+ {} fake)\".format(info[\"nft\"])\n else:\n info[\"nft\"] = \"\"\n return \"TileSet: {nrt} tiles{nft}, {nends} ends, {ntends} ends in tiles.\\nTiles: {tns}\\nEnds: {ens}\".format(\n **info\n )", "def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats", "def summary(self):\n return self.pfm", "def printSummary(self):\n pass", "def _read_fits(filename):\n if any(fn in os.path.basename(filename) for fn in COMPOSITE_MATCHES):\n with fits.open(filename) as hdu:\n data, header = hdu[1].data, hdu[1].header\n dqf = None\n elif any(fn in os.path.basename(filename) for fn in L1B_MATCHES):\n with fits.open(filename) as hdu:\n data, header, dqf = hdu[0].data, _fix_l1b_header(filename), hdu[1].data\n else:\n raise ValueError(\n f\"File {filename} does not look like a SUVI L1b FITS file or L2 HDR composite.\"\n )\n return header, data, dqf", "def summary(self):\n if hasattr(self,\"_summary\"):\n return self._summary\n else:\n return {}" ]
[ "0.58864206", "0.5854727", "0.58462995", "0.5822299", "0.57605237", "0.5732756", "0.57194835", "0.56438655", "0.55709666", "0.5556235", "0.55442023", "0.55412084", "0.5536046", "0.55326474", "0.55268484", "0.55177194", "0.55165523", "0.5450772", "0.5431529", "0.5425705", "0.5422506", "0.5422506", "0.54206884", "0.53835475", "0.5376119", "0.5330665", "0.53124976", "0.53017306", "0.52692735", "0.5255274" ]
0.8071825
0
Prompts the user for integer inputs and then prints out the largest two integers entered.
def largest_two(): # Add your code below!
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n s = set()\n\n while True:\n n = input('Enter a number: ')\n if n == -99:\n break\n\n s.add(n)\n\n l = list(s)\n\n if len(l) < 2:\n print 'sorry but the list is too small'\n exit(1)\n\n l.sort()\n print 'The second largest number is', l[-2]", "def getMaxNumber():\n maxNumber = int(input(\"what is the maximum number that you want:\"))\n return maxNumber", "def largest_int(numbers):\n\n if numbers == []:\n return \n max_int = numbers[0]\n for number in numbers:\n if number > max_int:\n max_int = number\n \n return max_int", "def find_max_numb(x,y):\n if x > y:\n print(x, \" - is max number.\")\n return x \n else:\n print(y, \" - is max number.\")\n return y", "def get_max_number():\n max_number = float(input(\"What is the max number you want?\"))\n return max_number", "def main(num, li1, list2):\n li1 = [[float(input()), float(input())] for i in range(num)]\n list2 = [li1[i][1]/li1[i][0] for i in range(num)]\n li1.sort(key=lambda x: x[0])\n for i in range(num):\n if li1[i][1]/li1[i][0] == max(list2):\n return print(\"%.2f %.2f\"%(li1[i][0], li1[i][1]))", "def max3(stdin):\n # return max(map(float, stdin.split()))\n return float(run(\"./max3\", [], stdin)[1])", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def get_max(num_one, num_two):\n temp_a = int(str(num_one) + str(num_two))\n temp_b = int(str(num_two) + str(num_one))\n if temp_a >= temp_b:\n return num_one\n else:\n return num_two", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n most: int = input[0]\n n: int = 1\n while n < len(input):\n if input[n] > most:\n most = input[n]\n n += 1 \n return most", "def largest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a > b, a, b)\r\n else:\r\n return max(stack(*args), axis=0)", "def maxi(a,b):\n\tif a > b: \n\t\treturn a\n\treturn b", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def maxi(a, b):\n return max(a, b)", "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def exercise3(self, param1, param2, param3):\n if param1 > param2:\n if param1 > param3:\n maximum = param1\n maximum = param2\n if param2 > param3:\n maximum = param2\n maximum = param3\n print \"max(\", param1, \", \", param2, \", \", param3, \") = \", maximum", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def main():\n numbers = prompt_for_numbers()\n sort(numbers)\n display(numbers)", "def finger(numbers):\n \n greater = 0 #storing largest number\n \n for i in numbers:\n if i%2 != 0 and i > greater: #check if odd and if larger than greater\n greater = i\n \n if greater == 0: # True if none are odd, greater var not changed\n return 'None of the numbers entered are odd.'\n \n return 'The largest odd number is: ' + str(greater)", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def user_prompt():\n print(\"We're going to find the greatest common divisor for two numbers.\\n\")\n a = int(input(\"Enter an integer for a: \"))\n b = int(input(\"Enter an integer for b: \"))\n print(gcd(a, b))", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def max(a,b):\n if a > b:\n print(a,'is maximum')\n return a\n elif a==b:\n return('The numbers are equal')\n else:\n print(b,'is maximum')\n return b", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def main():\n user_input = int(input(\"Enter an integer which is greater than 1, we will\"\n + \" print out all of prime numbers: \"))\n\n while user_input <= 1:\n print(\"Invalid input, please enter again: \")\n user_input = int(input(\"\"))\n else:\n prime_num = PrimeGenerator()\n prime_list = prime_num.primes_to_max(user_input)\n\n print(prime_list)", "def medium_num():\n a = int(input('Enter num: '))\n b = int(input('Enter num: '))\n c = int(input('Enter num: '))\n if b < a < c or c < a < b:\n print(f'{a}')\n elif a < b < c or c < b < a:\n print(f'{b}')\n else:\n print(f'{c}')", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)" ]
[ "0.6894364", "0.66742486", "0.6549433", "0.6529827", "0.6480421", "0.63954073", "0.637488", "0.6278114", "0.62751466", "0.61635184", "0.6126541", "0.6090925", "0.6066813", "0.605354", "0.6043354", "0.6009417", "0.60047644", "0.6004455", "0.6001876", "0.5999565", "0.59955436", "0.5995319", "0.59735185", "0.596945", "0.5966486", "0.59603786", "0.59296405", "0.5927576", "0.59211546", "0.5911704" ]
0.692001
0
Extract poet from the url link
def process_poem(url): response = get(url) html_soup = BeautifulSoup(response.text, 'html.parser') beyts = html_soup.find_all('span', class_ = 'verse') beyts = [beyt.text for beyt in beyts] info_dict = process_key_items(html_soup) info_dict['beyts'] = beyts return info_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_link(url):\n\theaders = {\"Host\": \"www.zomato.com\",\n\t \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0\",\n\t \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n\t \"Accept-Language\": \"en-US,en;q=0.5\",\n\t \"Accept-Encoding\": \"gzip, deflate, br\",\n\t \"Referer\": \"https://www.zomato.com/\",\n\t \"Connection\": \"keep-alive\"}\n\n\tif url.startswith('file'):\n\t\twith open(url.replace('file:\\\\\\\\', ''), encoding='utf-8') as fp:\n\t \t\tpage_source = fp.read()\n\n\telse:\n\t\tr = requests.get(url, headers=headers)\n\t\tif r.status_code == 404:\n\t\t\treturn None\n\t\tpage_source = r.text\n\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\tpage_source = re.sub('<br/>', '', page_source)\n\tsoup = BeautifulSoup(page_source, 'html.parser')\n\n\treturn soup", "def _real_extract(self, url):\n pass", "def extract_url(td):\n url = td.find('a',href=True)['href']\n return url", "def to_p(url):\n if type(url) != str:\n return\n match = config.p_re.match(url.strip())\n if match:\n return match.group(config.p_re_group_id)", "def parse(self, url):\n pass", "def extract_position_from_link(link):\n response = requests.get(link)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n mydivs = soup.find_all(\"div\", {\"id\": \"meta\"})\n if len(mydivs) != 1:\n return \"unknown\"\n\n pars = mydivs[0].find_all(\"p\")\n if len(mydivs) < 1:\n return \"unknown\"\n\n for par in pars:\n if \"Position:\" in par.text:\n start_idx = par.text.find(\"Position:\")\n strings = par.text[start_idx:17].split()\n if len(strings) < 2:\n return \"unknown\"\n position = strings[1]\n return position\n return \"unknown\"", "def get_info_of_url(url):\n pass", "def _parse_url(url: str) -> Optional[str]:\n match = re.search(r\"pastecord.com(?:/raw|/documents)?/(\\w+)(?:\\.\\w+)?\", url)\n if match is None:\n return None\n return match.group(1)", "def get_pid_from_url(url):\n return re.findall(r'store.lining.com/shop/goods-(\\w+).html\\w*', url)[0]", "def extract(self, url):\n if not self.is_tiny(url):\n return url\n # The actual extraction is done in utility method so that\n # result is cached only if is_tiny() check has passed.\n return self._do_extract(url)", "def find_link_title(link_para):\n urls = []\n source_code = requests.get(link_para)\n plain_text = source_code.text\n parsed_html = BeautifulSoup(plain_text)\n for sub_link in parsed_html.find_all('a'):\n urls.append(sub_link.string)\n print urls", "def extract_text(td):\n text = td.find('a',href=True).text\n return text", "def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})", "def extract(self, url):\n self.initialize()\n return self._real_extract(url)", "def extractParticular(link):\n webpage = openWebsite(link).read()\n nameIndexStart = webpage.index('<title>') + 7\n nameIndexStop = webpage[nameIndexStart:].index('</title>') + nameIndexStart - 1\n name = webpage[nameIndexStart : nameIndexStop].split('-')[0]\n name = \" \".join(name.split())\n name = re.sub('/', '', name)\n\n avatarName = RESTAURANTPATH + '{}.png'.format(\"\".join(name.split()).lower())\n captureImage(link, avatarName)\n\n return name, avatarName", "def extract_text(url, sem):\n with (yield from sem):\n page = yield from get(url)\n\n tree = etree.HTML(page)\n paragraphs = tree.findall('.//*/div[@class=\"entry-content\"]/p')[1:-1]\n return url, b'\\n'.join(map(etree.tostring, paragraphs))", "def getVotacion(self, url):", "def extract_real_link(self, text):\n if text.startswith('https://www.google.com/url?'):\n return parse_qs(urlparse(text).query)['url'][0]\n\n return text", "def extract_url_from_result(soup, url, listing_id):\r\n for link in soup.find_all(name='a', class_='prod_container'):\r\n url.append(link.get('href'))\r\n link_text = link.get('href')\r\n\r\n # From URL - extract listing ID\r\n try:\r\n id_regex = re.compile(r'\\d{7}')\r\n id_ = id_regex.search(link_text).group()\r\n listing_id.append(id_)\r\n except:\r\n listing_id.append('Not found')\r\n # print(url)\r\n # print(listing_id)\r\n return url, listing_id", "def extract_info_from_url(url):\n search = re.search(r\"^https://huggingface\\.co/(.*)/resolve/([^/]*)/(.*)$\", url)\n if search is None:\n return None\n repo, revision, filename = search.groups()\n cache_repo = \"--\".join([\"models\"] + repo.split(\"/\"))\n return {\"repo\": cache_repo, \"revision\": revision, \"filename\": filename}", "def extract_url(episode: dict) -> str:\n url = episode['embed_info']\n is_playable = episode['playable']\n episode_num = episode['number']\n #episode might not be playable or url might be empty\n if not is_playable or not url:\n raise Exception(f\"L'episodio {episode_num} non e' scaricabile.\")\n\n url = real_url(episode['embed_info'])\n url = format_url(url, episode['video_type'])\n if 'youtube.com' in url:\n raise Exception(f\"L'episodio {episode_num} e' un video di YouTube che puoi guardare a questo url: \\n{url}\")\n\n return url", "def webpage_miner(url):\n try:\n URL = str(url)\n extractor = Extractor(extractor=\"ArticleExtractor\", url=URL)\n out = extractor.getText()\n return [str(out), url]\n except Exception as e:\n pass", "def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract()\n item['poet_positions'] = poetdata[0].xpath('div//div/text()').extract()\n item['poet_posyears'] = poetdata[0].xpath('div//div/span/text()').extract()\n item['poet_bio'] = sresponse.xpath('//div[@class=\"field-items\"]//p//text()').extract()\n\n #this important link goes to the page of poems for each poet\n poetpoemlink = u''.join(sresponse.xpath('//div[@class=\"view-footer\"]/a/@href').extract())\n poet_poems_url = urlparse.urljoin(\"http://www.poets.org\",poetpoemlink)\n\n item['poet_poems_url'] = poet_poems_url\n\n #PoetItem finishes here\n yield item\n\n #goes to method that parses poems found in the poet_poems_url\n yield scrapy.Request(url=poet_poems_url, callback=self.parse_poet_poems, meta={'poet_poems_url': poet_poems_url })", "def scrape_promed_url(url):\n article_id_regex = re.compile('(id=|\\/post\\/)(?P<id>\\d+\\.?\\d*)')\n parse = article_id_regex.search(url)\n if parse:\n return scrape_promed_id(parse.groupdict().get('id'))\n else:\n raise Exception(\"Couldn't scrape url: \" + url)", "def DealUrlSecond14(self, match, content):\n __clear__0 = '<a href=\"//www.taobao.com/market/.*?</a>'\n __clear__1 = '<dd class=\"cat-title\" data-tag=\"\"><a href=.*?</a></dd>'\n model_break = '<a href=\"#nogo\"></a>'\n try:\n for line in content.split('\\n'):\n if re.search(model_break, line):\n break\n if re.search(__clear__1, line):\n each_part = '<dd class=\"cat-title\" data-tag=\"(.*?)</dd>'\n links = re.findall(each_part, line)\n for link in links:\n url_name = '<a href=\"(.*)\">(.*)</a>'\n result = re.findall(url_name, link)\n print >> match, result[0][0]+' '+result[0][1].decode('gbk').encode('utf-8')\n elif re.search(__clear__0, line):\n url_title = '<a href=\"(.*.php)\">(.*)</a>'\n result = re.findall(url_title, line)\n if result:\n if len(result[0][1]) < 10:\n print >> match, '\\n'\n print >> match, result[0][0]+' '+result[0][1].decode('gbk').encode('utf-8')\n except:\n print 'Something wrong is happening in the GetUrlSecond!'", "def core_url(url):\n m = re.search('(http(s)?://(((www|.+\\.blogs)\\.nytimes\\.com.+)(roomfordebate/.+|/$|.html)))', url)\n if m:\n return(m.group(3))\n else:\n return('********url %s not pulled' % url)", "def extract_track_url(search):\n\n if 'tracks' in search:\n tracks = search['tracks']\n if 'items' in tracks:\n items = tracks['items']\n # take the first url we can find\n for item in items:\n if 'external_urls' in item:\n external_urls = item['external_urls']\n if 'spotify' in external_urls:\n url = external_urls['spotify']\n return url", "def link_extract(link_text, content):\n h = html5lib.parse(content, namespaceHTMLElements=False)\n candidates = h.findall(\".//a[.='%s']\" % link_text)\n if not candidates:\n return 'NOT MATCHED'\n try:\n return candidates[0].attrib['href']\n except:\n return 'NOT MATCHED'", "def _extract_page_info(article: dict, url: str) -> dict:\n\n if not article:\n return {}\n language = detect(article.get('content_text'))\n if len(language) > 2 and len(language[2]) > 1:\n language_code = language[2][0][1]\n else:\n language_code = None\n return {'url': url, 'language': language_code}", "def get_url(url):\n article = Article(url, language='en')\n article.download()\n article.parse()\n return {\"title\": article.title, \"text\": article.text}" ]
[ "0.63206905", "0.6201261", "0.6153495", "0.6138546", "0.6091984", "0.60546577", "0.59694636", "0.5872868", "0.5828914", "0.58068365", "0.5788576", "0.5785462", "0.5776612", "0.5776533", "0.57658374", "0.5763261", "0.5761654", "0.57473385", "0.5730871", "0.5727238", "0.5724248", "0.5717864", "0.56609356", "0.56552714", "0.5647507", "0.5608627", "0.5586401", "0.557751", "0.5575303", "0.5574788" ]
0.6467172
0
Example of using read_poem function for Rumi.
def test_RUMI(): start = 11051 end = 11902 read_poems(start, end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_poem(self, p):\n poem = []\n number = self.int2roman[p] + \".\"\n next_number = self.int2roman[p + 1] + \".\"\n print(self.index[number])\n index1 = self.index[number][0]\n index2 = self.index[next_number][0]\n for i in range(index1 + 1, index2):\n if self.msgs[i] != \" \" and self.get_msg(i) != \"\":\n poem.append(self.get_msg(i))\n return poem", "def read():\n # TODO", "def readOdom(msg):\n global pose\n global xPosition\n global yPosition\n global theta\n global odom_list\n global odom_tf\n try:\n pose = msg.pose\n geo_quat = pose.pose.orientation\n q = [geo_quat.x, geo_quat.y, geo_quat.z, geo_quat.w]\n odom_tf.sendTransform((pose.pose.position.x, pose.pose.position.y, 0), \n (pose.pose.orientation.x, pose.pose.orientation.y,pose.pose.orientation.z,pose.pose.orientation.w),rospy.Time.now(),\"base_footprint\",\"odom\")\n #Convert transform to global usable coordinates (x, y, theta)\n (trans, rot) = odom_list.lookupTransform('map', 'base_footprint', rospy.Time(0))\n roll, pitch, yaw = euler_from_quaternion(rot)\n theta = yaw * (180.0/math.pi)\n xPosition = trans[0]\n yPosition = trans[1]\n except:\n print \"waiting\"", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def hmm_poem():\n return render_template(\"hmm_poem.html\")", "def read_all_pram(self):\n return self.PRAM", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def test_eval_omim(self):\n evaluator = self.engine\n eos = evaluator.eval(\"omim\")\n with open(PREDICTIONS_OMIM_OUT, \"w\") as f:\n yaml.dump(eos.dict(), f)", "def read_particle( filename, species, quantity ) :\n # Translate the quantity to the OpenPMD format\n dict_quantity = { 'x' : 'position/x',\n 'y' : 'position/y',\n 'z' : 'position/z',\n 'ux' : 'momentum/x',\n 'uy' : 'momentum/y',\n 'uz' : 'momentum/z',\n 'w' : 'weighting'}\n if quantity in dict_quantity:\n opmd_quantity = dict_quantity[quantity]\n else:\n opmd_quantity = quantity\n\n # Open the HDF5 file\n dfile = h5py.File( filename, 'r' )\n base_path = get_bpath( dfile )\n particles_path = dfile.attrs['particlesPath'].decode()\n\n # Find the right dataset\n species_grp = dfile[ os.path.join( base_path, particles_path, species ) ]\n data = get_data( species_grp[ opmd_quantity ] )\n\n # - Return positions in microns, with an offset\n if quantity in ['x', 'y', 'z']:\n offset = get_data( species_grp[ 'positionOffset/%s' %quantity ] )\n data = 1.e6 * (data + offset)\n # - Return momentum in normalized units\n elif quantity in ['ux', 'uy', 'uz' ]: \n norm_factor = 1./( get_data( species_grp['mass'] ) * constants.c )\n data = data * norm_factor\n\n # Close the HDF5 file and return the data\n dfile.close()\n return( data )", "def read_gmm(read):\n\tmodel = mixture.GMM(n_components=8)\n\tread_main = read.get_read()\n\talignments = read.get_alignments()\n\n\t# Generates observations\n\t# bases are converted to their ascii character values\n\tread_list = [ord(c) for c in read_main]\n\tobservations = [ ]\n\tfor alignment in alignments:\n\t\talignment_list = [ord(c) for c in alignment] \n\t\tobservations.append( alignment_list )\n\t# for base_index, base in enumerate(read_main):\n\t# \tbase_observations = [ord(base)]\n\t# \tfor alignment in alignments:\n\t# \t\tbase_observations.append(ord(alignment[base_index]))\n\n\t# \tobservations.append(base_observations)\n\n\tprint model.fit(observations)\n\tprint np.round(model.means_, 2)\n\t\n\tprint model.predict([read_list])", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def test_PhonopyYaml_read(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n cell = _get_unitcell(filename)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def new_hmm_poem():\n #Return a json dictionary, containing the next seed and sentence\n return re.sub(\"\\n\", \"<br>\", hmm.generate_poem())", "def readPAR(self,phys,parname):\r\n PARReader.PARReader(self.checkPath(parname),0).read(phys.myPAR)\r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def read_pe(self, image_base, size=0):\n # Calculate the size of the in-memory PE\n self.ret = b\"\"\n\n size = self._calc_pe_size(image_base)\n\n if size == 0:\n return self.ret\n\n # Read PE data from IDA database\n self.ret = self.get_bytes(image_base, size)\n return self.ret", "def readPwmFile(pwmFileName, outputLocation, pseudocounts=0.0):\n\n # Adding pseudocounts\n pwmFile = open(pwmFileName,\"r\");\n tempFileName = outputLocation+pwmFileName.split(\"/\")[-1]+\"temp\"\n pwmFileT = open(tempFileName,\"w\")\n for line in pwmFile: pwmFileT.write(\" \".join([str(float(e)+pseudocounts) for e in line.strip().split(\" \")])+\"\\n\") \n pwmFile.close()\n pwmFileT.close()\n\n # Creating PWM from pseudocounted input\n pwmFile = open(tempFileName,\"r\")\n pwm = Motif.read(pwmFile,\"jaspar-pfm\")\n pwmFile.close()\n os.system(\"rm \"+tempFileName)\n return pwm", "def read_motivation(user):\r\n motiv_file = user + ' Motivation.txt'\r\n file_path = os.path.join(root_dir, 'data', motiv_file)\r\n\r\n with open(file_path, 'r') as content_file:\r\n Motivation = content_file.read()\r\n\r\n Motivation = Motivation.decode(\"utf-8\")\r\n return Motivation", "def readLBOMDIN(self):\n logger = logging.getLogger(__name__)\n\n if os.path.exists(\"lbomd.IN\"):\n f = open(\"lbomd.IN\")\n\n try:\n f.readline()\n f.readline()\n f.readline()\n f.readline()\n\n line = f.readline().strip()\n array = line.split()\n try:\n PBC = [0] * 3\n PBC[0] = int(array[0])\n PBC[1] = int(array[1])\n PBC[2] = int(array[2])\n\n except IndexError:\n logger.warning(\"Index error 2 (check lbomd.IN format)\")\n\n except Exception:\n err = \"Read lbomd.IN failed with error:\\n\\n%s\" % \"\".join(traceback.format_exception(*sys.exc_info()))\n self.displayError(err)\n\n finally:\n f.close()", "def read(self):", "def parse_pob(self):\n \n index = self.index\n start = self.index \n \n if self.words[index]['word'] == 'pob' or self.words[index]['word'] == 'pmb':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] == '.':\n return self.words[index+1]['word'], 3\n else:\n return self.words[index]['word'], 2\n elif self.words[index]['word'] == 'p':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] == '.':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] not in ['o', 'm']:\n return None, 0\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] == '.':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] in ['b', 'box']:\n index += 1\n if index == self.length:\n return None, 0\n elif not self.words[index]['word'].isdigit():\n return None,0\n if self.words[index]['word'] == '.':\n index += 1\n if index == self.length:\n return None, 0\n return self.words[index]['word'], index - start + 1\n \n if self.words[index]['word'] == 'po':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] == 'box':\n index += 1\n if index == self.length:\n return None, 0\n return self.words[index]['word'], index - start + 1\n \n return None, 0", "def test_ipam_rirs_read(self):\n pass", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress", "def read(self) -> int:", "def read_input(f, UUID=None):\n \n #Read input file in as dictionary \n input_dict = input.file_to_dict(f)\n \n #Interpret input terms common across calculations\n #Assert required keys exist\n assert 'lammps_command' in input_dict, 'lammps_command value not supplied'\n assert 'potential_file' in input_dict, 'potential_file value not supplied'\n assert 'load' in input_dict, 'load value not supplied'\n \n #Set default values to optional keys\n if UUID is not None:\n input_dict['uuid'] = UUID\n else:\n input_dict['uuid'] = input_dict.get('uuid', str(uuid.uuid4()))\n \n input_dict['mpi_command'] = input_dict.get('mpi_command', None)\n input_dict['potential_dir'] = input_dict.get('potential_dir', '')\n \n input_dict['length_unit'] = input_dict.get('length_unit', 'angstrom')\n input_dict['energy_unit'] = input_dict.get('energy_unit', 'eV')\n input_dict['pressure_unit'] = input_dict.get('pressure_unit', 'GPa')\n input_dict['force_unit'] = input_dict.get('force_unit', 'eV/angstrom') \n \n input_dict['symbols'] = input_dict.get('symbols', None)\n \n #Read contents of potential_file into potential\n with open(input_dict['potential_file']) as f:\n input_dict['potential'] = DM(f)\n \n #Interpret input terms unique to this calculation.\n input_dict['boundary_shape'] = input_dict.get('boundary_shape', 'circle')\n input_dict['boundary_width'] = float(input_dict.get('boundary_width', 3.0))\n \n input_dict['anneal_temperature'] = input.value_unit(input_dict, 'anneal_temperature', default_unit='K', default_term='0.0 K') \n \n input_dict['energy_tolerance'] = float(input_dict.get('energy_tolerance', 0.0))\n input_dict['force_tolerance'] = input.value_unit(input_dict, 'force_tolerance', default_unit=input_dict['force_unit'], default_term='1e-6 eV/angstrom')\n input_dict['maximum_iterations'] = int(input_dict.get('maximum_iterations', 100000))\n input_dict['maximum_evaluations'] = int(input_dict.get('maximum_evaluations', 100000))\n \n #input_dict['elastic_constants_model'] = input_dict.get('elastic_constants_model', input_dict['load'].split()[1])\n \n #Extract explicit elastic constants from input_dict\n Cdict = {}\n for key in input_dict.iterkeys():\n if key[0] == 'C':\n Cdict[key] = input.value_unit(input_dict, key, default_unit=input_dict['pressure_unit'])\n if len(Cdict) > 0:\n assert 'elastic_constants_model' not in input_dict, 'Cij values and elastic_constants_model cannot both be specified.'\n input_dict['elastic_constants_model'] = None \n input_dict['C'] = am.tools.ElasticConstants(**Cdict)\n \n #If no Cij elastic constants defined check for elastic_constants_model\n else:\n #load file may be the elastic_constants_model file\n input_dict['elastic_constants_model'] = input_dict.get('elastic_constants_model', input_dict['load'].split()[1])\n \n with open(input_dict['elastic_constants_model']) as f:\n C_model = DM(f)\n \n try:\n input_dict['elastic_constants_model'] = DM([('elastic-constants', C_model.find('elastic-constants'))])\n input_dict['C'] = am.tools.ElasticConstants(model=input_dict['elastic_constants_model'])\n except:\n input_dict['elastic_constants_model'] = None \n input_dict['C'] = None \n \n return input_dict", "def readAMBERTop(self, phys, filename):\r\n\r\n def skipLine(data):\r\n nl = data.index('\\n')\r\n return data[nl+1:len(data)]\r\n\r\n def jumpTo(data, target):\r\n fp = data.index(target)\r\n return data[fp:len(data)]\r\n\r\n def readRemove(data, size):\r\n retval = data[0:size-1]\r\n return data[size:len(data)]\r\n\r\n def getInteger(data):\r\n pos = 0\r\n retval = \"\"\r\n while (not data[pos].isdigit()):\r\n pos = pos + 1\r\n while (data[pos].isdigit()):\r\n retval = retval + data[pos]\r\n pos = pos + 1\r\n data = data[pos:len(data)]\r\n return int(retval), data\r\n\r\n def parse(data, arr, str, count, dtype, tupsize=1):\r\n data = jumpTo(data, \"%FLAG \"+str)\r\n data = jumpTo(data, \"%FORMAT\")\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data) \r\n \r\n arr2 = []\r\n numread = 0\r\n for j in range(0, (tupsize*count-1) / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n if (tupsize == 1):\r\n arr.append(dtype(data[0:fieldsize].strip()))\r\n else:\r\n arr2.append(dtype(data[0:fieldsize].strip()))\r\n if (len(arr2) == tupsize):\r\n arr.append(arr2)\r\n arr2 = []\r\n numread += 1\r\n data = data[fieldsize:len(data)]\r\n if (numread == tupsize*count):\r\n break\r\n data = skipLine(data) \r\n return data\r\n\r\n def scan(data, str):\r\n return (data.count(str) != 0)\r\n\r\n\r\n f = open(filename, 'r')\r\n data = f.read()\r\n\r\n # First Line: VERSION ...\r\n data = skipLine(data)\r\n\r\n # Go To: %FLAG POINTERS\r\n data = jumpTo(data, '%FLAG POINTERS')\r\n\r\n data = jumpTo(data, '%FORMAT')\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data)\r\n \r\n temp = []\r\n numread = 0\r\n for j in range(0, 31 / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n temp.append(int(data[0:8]))\r\n data = data[8:len(data)]\r\n numread += 1\r\n if (numread == 31):\r\n break\r\n data = skipLine(data)\r\n \r\n [natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n\r\n\r\n #################################################\r\n # Read AtomTypes\r\n atomnames = []\r\n charges = []\r\n masses = []\r\n atindex = []\r\n exclusions = []\r\n nparams = []\r\n reslabels = []\r\n respointers = []\r\n forceconstants = [[], [], []] # bond, angle, dihedral\r\n equilvals = [[], [], [[], []]] # bond, angle, dihedral\r\n scee_scales = []\r\n scnb_scales = []\r\n solty = []\r\n lj_acoef = []\r\n lj_bcoef = []\r\n\r\n data = parse(data, atomnames, \"ATOM_NAME\", natoms, str) \r\n data = parse(data, charges, \"CHARGE\", natoms, float)\r\n data = parse(data, masses, \"MASS\", natoms, float)\r\n data = parse(data, atindex, \"ATOM_TYPE_INDEX\", natoms, int)\r\n data = parse(data, exclusions, \"NUMBER_EXCLUDED_ATOMS\", natoms, int)\r\n data = parse(data, nparams, \"NONBONDED_PARM_INDEX\", ntypes*ntypes, int)\r\n data = parse(data, reslabels, \"RESIDUE_LABEL\", nres, str)\r\n data = parse(data, respointers, \"RESIDUE_POINTER\", nres, int)\r\n data = parse(data, forceconstants[0], \"BOND_FORCE_CONSTANT\", numbnd, float)\r\n data = parse(data, equilvals[0], \"BOND_EQUIL_VALUE\", numbnd, float)\r\n data = parse(data, forceconstants[1], \"ANGLE_FORCE_CONSTANT\", numang, float)\r\n data = parse(data, equilvals[1], \"ANGLE_EQUIL_VALUE\", numang, float)\r\n data = parse(data, forceconstants[2], \"DIHEDRAL_FORCE_CONSTANT\", nptra, float)\r\n data = parse(data, equilvals[2][0], \"DIHEDRAL_PERIODICITY\", nptra, float)\r\n data = parse(data, equilvals[2][1], \"DIHEDRAL_PHASE\", nptra, float)\r\n if (scan(data, \"SCEE_SCALE_FACTOR\")):\r\n data = parse(data, scee_scales, \"SCEE_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scee_scales.append(1.2) # Default \r\n if (scan(data, \"SCNB_SCALE_FACTOR\")):\r\n data = parse(data, scnb_scales, \"SCNB_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scnb_scales.append(2.0) # Default \r\n\r\n data = parse(data, solty, \"SOLTY\", natyp, float)\r\n data = parse(data, lj_acoef, \"LENNARD_JONES_ACOEF\", ntypes*(ntypes+1)/2, float)\r\n data = parse(data, lj_bcoef, \"LENNARD_JONES_BCOEF\", ntypes*(ntypes+1)/2, float)\r\n\r\n\r\n ##########################################################\r\n # STRUCTURE\r\n\r\n bonds = [[], []] # With H, Without H\r\n angles = [[], []] # With H, Without H\r\n dihedrals = [[], []] # With H, Without H\r\n impropers = [[], []] # With H, Without H\r\n excluded_atoms = [] \r\n hbond_acoef = []\r\n hbond_bcoef = []\r\n hbcut = []\r\n amber_atom_types = []\r\n tree_chain = []\r\n join_array = []\r\n irotat = []\r\n radii = []\r\n screen = []\r\n\r\n data = parse(data, bonds[0], \"BONDS_INC_HYDROGEN\", nbonh, int, 3)\r\n data = parse(data, bonds[1], \"BONDS_WITHOUT_HYDROGEN\", nbona, int, 3)\r\n data = parse(data, angles[0], \"ANGLES_INC_HYDROGEN\", ntheth, int, 4)\r\n data = parse(data, angles[1], \"ANGLES_WITHOUT_HYDROGEN\", ntheta, int, 4)\r\n data = parse(data, dihedrals[0], \"DIHEDRALS_INC_HYDROGEN\", nphih, int, 5)\r\n data = parse(data, dihedrals[1], \"DIHEDRALS_WITHOUT_HYDROGEN\", nphia, int, 5)\r\n \r\n # MERGE ARRAYS - PM HANDLES THE H+\r\n final_bonds = bonds[0] + bonds[1]\r\n final_angles = angles[0] + angles[1]\r\n final_dihedrals = dihedrals[0] + dihedrals[1]\r\n final_impropers = []\r\n \r\n # CLEAN UP THE TRASH\r\n del(bonds)\r\n del(angles)\r\n del(dihedrals)\r\n \r\n\r\n # Move impropers into their own array\r\n i = 0\r\n while (i < len(final_dihedrals)):\r\n if (final_dihedrals[i][2] < 0): # 1-4 exclusions are handled by our back end\r\n final_dihedrals[i][2] *= -1\r\n if (final_dihedrals[i][3] < 0):\r\n final_dihedrals[i][3] *= -1 # Make + again\r\n final_impropers.append(final_dihedrals[i])\r\n final_dihedrals.remove(final_dihedrals[i])\r\n i -= 1\r\n i += 1\r\n\r\n # Convert charge units\r\n for i in range(0, len(charges)):\r\n charges[i] /= 18.223\r\n\r\n\r\n data = parse(data, excluded_atoms, \"EXCLUDED_ATOMS_LIST\", nnb, int)\r\n data = parse(data, hbond_acoef, \"HBOND_ACOEF\", nphb, float)\r\n data = parse(data, hbond_bcoef, \"HBOND_BCOEF\", nphb, float)\r\n data = parse(data, hbcut, \"HBCUT\", nphb, float)\r\n data = parse(data, amber_atom_types, \"AMBER_ATOM_TYPE\", natoms, str)\r\n data = parse(data, tree_chain, \"TREE_CHAIN_CLASSIFICATION\", natoms, str)\r\n data = parse(data, join_array, \"JOIN_ARRAY\", natoms, int)\r\n data = parse(data, irotat, \"IROTAT\", natoms, int)\r\n data = parse(data, radii, \"RADII\", natoms, float)\r\n data = parse(data, screen, \"SCREEN\", natoms, float)\r\n\r\n # Further process dihedrals and impropers\r\n # Deal with multiplicity\r\n # A bit ugly, but the fastest for now\r\n # forceconstants[2][dihedrals[0][i][4]-1], int(equilvals[2][0][dihedrals[0][i][4]-1]), equilvals[2][1][dihedrals[0][i][4]-1]\r\n\r\n mult_di = dict()\r\n mult_im = dict()\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n if (not mult_di.has_key(di_id)):\r\n mult_di[di_id] = [1, False, [forceconstants[2][final_dihedrals[i][4]-1]], [int(equilvals[2][0][final_dihedrals[i][4]-1])], [equilvals[2][1][final_dihedrals[i][4]-1]]]\r\n else:\r\n mult_di[di_id][0] += 1\r\n mult_di[di_id][2].append(forceconstants[2][final_dihedrals[i][4]-1])\r\n mult_di[di_id][3].append(int(equilvals[2][0][final_dihedrals[i][4]-1]))\r\n mult_di[di_id][4].append(equilvals[2][1][final_dihedrals[i][4]-1])\r\n \r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n if (not mult_im.has_key(di_id)):\r\n mult_im[im_id] = [1, False, [forceconstants[2][final_impropers[i][4]-1]], [int(equilvals[2][0][final_impropers[i][4]-1])], [equilvals[2][1][final_impropers[i][4]-1]]]\r\n else:\r\n mult_im[im_id][0] += 1\r\n mult_im[im_id][2].append(forceconstants[2][final_impropers[i][4]-1])\r\n mult_im[im_id][3].append(int(equilvals[2][0][final_impropers[i][4]-1]))\r\n mult_im[im_id][4].append(equilvals[2][1][final_impropers[i][4]-1])\r\n\r\n\r\n\r\n \r\n #[natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n #phys.myPSF.createAll(natoms, nbonh+mbona, ntheth+mtheta,\r\n # len(dihedrals[0])+len(dihedrals[1]),\r\n # len(impropers[0])+len(impropers[1]),\r\n # 0, 0, 0, 0)\r\n \r\n # Add atoms\r\n curres = 1\r\n for i in range(0, natoms):\r\n phys.myPSF.addAtom(i, 'SIM', curres, reslabels[curres-1],\r\n atomnames[i], atomnames[i], charges[i],\r\n masses[i]) \r\n if (curres != nres and i >= respointers[curres]):\r\n curres += 1\r\n\r\n # Add bonds\r\n for i in range(0, nbonh+nbona):\r\n phys.myPSF.addBond(i+1, final_bonds[i][0]/3+1, final_bonds[i][1]/3+1)\r\n phys.myPAR.addBond(i+1, atomnames[final_bonds[i][0]/3], atomnames[final_bonds[i][1]/3], forceconstants[0][final_bonds[i][2]/3], equilvals[0][final_bonds[i][2]/3])\r\n \r\n # Add angles\r\n for i in range(0, ntheth+ntheta):\r\n phys.myPSF.addAngle(i+1, final_angles[i][0]/3+1, final_angles[i][1]/3+1, final_angles[i][2]/3+1)\r\n phys.myPAR.addAngle(i+1, atomnames[final_angles[i][0]/3], atomnames[final_angles[i][1]/3], atomnames[final_angles[i][2]/3], forceconstants[1][final_angles[i][3]/3], equilvals[1][final_angles[i][3]/3])\r\n \r\n # Add dihedrals\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n mult = mult_di[di_id][0]\r\n checked = mult_di[di_id][1]\r\n print di_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], forceconstants[2][final_dihedrals[i][4]-1], int(equilvals[2][0][final_dihedrals[i][4]-1]), equilvals[2][1][final_dihedrals[i][4]-1])\r\n else:\r\n mult_di[di_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_di[di_id][2])):\r\n fcvec.push_back(mult_di[di_id][2][j])\r\n periodvec.push_back(mult_di[di_id][3][j])\r\n phasevec.push_back(mult_di[di_id][4][j])\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n \r\n\r\n\r\n\r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n mult = mult_im[im_id][0]\r\n checked = mult_im[im_id][1]\r\n print im_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], forceconstants[2][final_impropers[i][4]-1], int(equilvals[2][0][final_impropers[i][4]-1]), equilvals[2][1][final_impropers[i][4]-1])\r\n else:\r\n mult_im[im_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_im[im_id][2])):\r\n fcvec.push_back(mult_im[im_id][2][j])\r\n periodvec.push_back(mult_im[im_id][3][j])\r\n phasevec.push_back(mult_im[im_id][4][j])\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n\r\n \r\n # Need to add garbage nonbonded stuff for now\r\n for i in range(0, natoms):\r\n phys.myPAR.addNonbonded(i, atomnames[i], 1, 1, 1, 1, 1, 1)\r\n\r\n # Add VDW parameters\r\n # AMBER has the Aij and Bij already in the parameter file\r\n # This actually makes life easier.\r\n # CHARMM does not, they simply have the original sigma and epsilon.\r\n # To compensate for this, for now we will leave the nonbondeds empty in phys.myPAR\r\n # We will then access the LennardJones parameter table in Topology directly\r\n k = 0\r\n phys.myTop.resizeLennardJonesParameters(ntypes)\r\n for i in range(0, ntypes):\r\n for j in range(i, ntypes):\r\n params = GenericTopology.LennardJonesParameters(lj_acoef[k], lj_bcoef[k])\r\n k += 1\r\n phys.myTop.setLennardJonesParameters(i, j, params)\r\n \r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def load_rd_prms(in_file):\n\n prms = np.load(in_file)\n return (prms['vs'],\n prms['mbs'].item(),\n prms['A'],\n prms['B'],\n prms['C'],\n prms['D'],\n prms['E'],\n prms['F'],\n prms['G'],\n prms['synUmax'],\n prms['synVmax'],\n prms['ucmax'],\n prms['dt'],\n prms['Du'],\n prms['Dv'],\n prms['RR'])", "def test_read_mol_input():\n # good input\n read_mol_input(os.path.join(TEST_DIR, \"example_mol_input_file.txt\"))\n # good input with extra spaces\n read_mol_input(os.path.join(TEST_DIR, \"example2_mol_input_file.txt\"))\n # no such file error\n assert_raises(FileNotFoundError, read_mol_input, 'no-such-file')\n # qcm appears twice\n assert_raises(ValueError, read_mol_input, os.path.join(TEST_DIR, \"bad1_mol_input_file.txt\"))\n # missing struct type\n assert_raises(ValueError, read_mol_input, os.path.join(TEST_DIR, \"bad2_mol_input_file.txt\"))", "def read(ios):\n assert(isinstance(ios, io.IOBase))\n return Reader(ios).read()", "def load(self):\n\n address = 0\n\n program = sys.argv[1]\n\n with open(program) as p:\n for instruction in p:\n if instruction[0] == '#':\n continue\n\n instruction = instruction.strip()\n temp = instruction.split()\n\n if len(temp) == 0:\n continue\n\n self.ram[address] = int(temp[0], 2)\n address += 1\n \n # print(\"======= PROGRAM =========\")\n # for i in self.ram[:35]:\n # print(i)", "def load_POD(POD_file):\n if not os.path.isfile(POD_file):\n raise RuntimeError('POD file specified is not a valid file')\n\n # Load data from file\n print('Loading POD data from {}'.format(POD_file))\n POD_data = scutils.load_data(POD_file)\n\n # Generate POD object\n rom = POD(POD_data['POD_info'])\n\n return rom" ]
[ "0.58294064", "0.5599925", "0.5458955", "0.5439821", "0.5309595", "0.52851164", "0.5246632", "0.52451646", "0.51888615", "0.51713043", "0.51402664", "0.50897837", "0.498432", "0.4937878", "0.49296248", "0.4915086", "0.48869547", "0.48765168", "0.485589", "0.48378807", "0.48279318", "0.47893724", "0.477142", "0.4724087", "0.47118333", "0.47093743", "0.4708867", "0.4705663", "0.46883923", "0.46877912" ]
0.6828331
0
Example of using read_poem function for JAMI.
def test_JAMI(): start = 4107 end = 4126 foldersave = SAVE_PATH + '/jami' if not os.path.isdir(foldersave): os.mkdir(foldersave) read_poems('jami' ,start, end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read():\n # TODO", "def test_RUMI():\r\n\r\n start = 11051\r\n end = 11902\r\n read_poems(start, end)", "def get_poem(self, p):\n poem = []\n number = self.int2roman[p] + \".\"\n next_number = self.int2roman[p + 1] + \".\"\n print(self.index[number])\n index1 = self.index[number][0]\n index2 = self.index[next_number][0]\n for i in range(index1 + 1, index2):\n if self.msgs[i] != \" \" and self.get_msg(i) != \"\":\n poem.append(self.get_msg(i))\n return poem", "def read_pe(self, image_base, size=0):\n # Calculate the size of the in-memory PE\n self.ret = b\"\"\n\n size = self._calc_pe_size(image_base)\n\n if size == 0:\n return self.ret\n\n # Read PE data from IDA database\n self.ret = self.get_bytes(image_base, size)\n return self.ret", "def parse_poet_poem(self, response):\n poemitem = response.meta['poemitem']\n sresponse = scrapy.Selector(response)\n poemitem['poem_text'] = sresponse.xpath('//div[@property = \"content:encoded\"]//text()').extract()\n poemitem['poem_copyright'] = sresponse.xpath('//div[@class = \"poem-credit\"]//p//text()').extract()\n\n yield poemitem", "def test_pmkb_pb():\n harvested_evidence = json_format.Parse(\n PMKB_TEST_MESSAGE,\n evidence.Evidence(),\n ignore_unknown_fields=True)\n assert harvested_evidence.gene == \"ABL1\"\n assert harvested_evidence.source == \"pmkb\"\n assert harvested_evidence.feature.end == 133748283\n assert 'kinase domain mutations' in \\\n harvested_evidence.association.description\n assert harvested_evidence.pmkb", "def new_hmm_poem():\n #Return a json dictionary, containing the next seed and sentence\n return re.sub(\"\\n\", \"<br>\", hmm.generate_poem())", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def test_artemis_reader():\n _test_raw_reader(\n read_raw_artemis123,\n input_fname=short_hpi_1kz_fname,\n pos_fname=dig_fname,\n verbose=\"error\",\n )", "def get_pem():\n try:\n with open('encrypted_pem.txt', 'r') as encrypted_pem:\n pem_file = encrypted_pem.read()\n\n kms = boto3.client('kms', region_name=REGION)\n return kms.decrypt(CiphertextBlob=b64decode(pem_file))['Plaintext']\n except (IOError, ClientError, KeyError) as err:\n LOGGER.error(err)\n return False", "def test_PhonopyYaml_read(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n cell = _get_unitcell(filename)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def readOdom(msg):\n global pose\n global xPosition\n global yPosition\n global theta\n global odom_list\n global odom_tf\n try:\n pose = msg.pose\n geo_quat = pose.pose.orientation\n q = [geo_quat.x, geo_quat.y, geo_quat.z, geo_quat.w]\n odom_tf.sendTransform((pose.pose.position.x, pose.pose.position.y, 0), \n (pose.pose.orientation.x, pose.pose.orientation.y,pose.pose.orientation.z,pose.pose.orientation.w),rospy.Time.now(),\"base_footprint\",\"odom\")\n #Convert transform to global usable coordinates (x, y, theta)\n (trans, rot) = odom_list.lookupTransform('map', 'base_footprint', rospy.Time(0))\n roll, pitch, yaw = euler_from_quaternion(rot)\n theta = yaw * (180.0/math.pi)\n xPosition = trans[0]\n yPosition = trans[1]\n except:\n print \"waiting\"", "def hmm_poem():\n return render_template(\"hmm_poem.html\")", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def read_all_pram(self):\n return self.PRAM", "def readPAR(self,phys,parname):\r\n PARReader.PARReader(self.checkPath(parname),0).read(phys.myPAR)\r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def find_long_description():\n return read_file('../README.md')", "def _read_message(self):\n if self.__eof:\n return None\n result = {}\n line = sys.stdin.readline()\n while line == '\\n':\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return None\n s = line.split(\" \", 1)\n result['_number'] = int(s[0])\n result['_text'] = s[1].strip()\n\n while not self.__eof:\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return result\n if line == '\\n':\n return result\n s = line.split(\":\", 1)\n result[s[0]] = s[1].strip()", "def read(self):", "def parse_readme():\n # Get the long description from the relevant file\n readme_path = path.join(here, 'README.md')\n with codecs.open(readme_path, encoding='utf-8') as handle:\n desc = handle.read()\n\n return desc", "def read(self):\n return \"print('Hello World')\".encode()", "def test_readme():\n readme = Path(README_PATH).read_text()\n Actions.read_from_md(readme)", "def read(self):\n self._read_into_buffer()\n # print([hex(i) for i in self._buffer])\n\n # check packet header\n if not self._buffer[0:2] == b\"BM\":\n raise RuntimeError(\"Invalid PM2.5 header\")\n\n # check frame length\n frame_len = struct.unpack(\">H\", self._buffer[2:4])[0]\n if frame_len != 28:\n raise RuntimeError(\"Invalid PM2.5 frame length\")\n\n checksum = struct.unpack(\">H\", self._buffer[30:32])[0]\n check = sum(self._buffer[0:30])\n if check != checksum:\n raise RuntimeError(\"Invalid PM2.5 checksum\")\n\n # unpack data\n (\n self.aqi_reading[\"pm10 standard\"],\n self.aqi_reading[\"pm25 standard\"],\n self.aqi_reading[\"pm100 standard\"],\n self.aqi_reading[\"pm10 env\"],\n self.aqi_reading[\"pm25 env\"],\n self.aqi_reading[\"pm100 env\"],\n self.aqi_reading[\"particles 03um\"],\n self.aqi_reading[\"particles 05um\"],\n self.aqi_reading[\"particles 10um\"],\n self.aqi_reading[\"particles 25um\"],\n self.aqi_reading[\"particles 50um\"],\n self.aqi_reading[\"particles 100um\"],\n ) = struct.unpack(\">HHHHHHHHHHHH\", self._buffer[4:28])\n\n return self.aqi_reading", "def read_particle( filename, species, quantity ) :\n # Translate the quantity to the OpenPMD format\n dict_quantity = { 'x' : 'position/x',\n 'y' : 'position/y',\n 'z' : 'position/z',\n 'ux' : 'momentum/x',\n 'uy' : 'momentum/y',\n 'uz' : 'momentum/z',\n 'w' : 'weighting'}\n if quantity in dict_quantity:\n opmd_quantity = dict_quantity[quantity]\n else:\n opmd_quantity = quantity\n\n # Open the HDF5 file\n dfile = h5py.File( filename, 'r' )\n base_path = get_bpath( dfile )\n particles_path = dfile.attrs['particlesPath'].decode()\n\n # Find the right dataset\n species_grp = dfile[ os.path.join( base_path, particles_path, species ) ]\n data = get_data( species_grp[ opmd_quantity ] )\n\n # - Return positions in microns, with an offset\n if quantity in ['x', 'y', 'z']:\n offset = get_data( species_grp[ 'positionOffset/%s' %quantity ] )\n data = 1.e6 * (data + offset)\n # - Return momentum in normalized units\n elif quantity in ['ux', 'uy', 'uz' ]: \n norm_factor = 1./( get_data( species_grp['mass'] ) * constants.c )\n data = data * norm_factor\n\n # Close the HDF5 file and return the data\n dfile.close()\n return( data )", "def read_exercise(env, label):\n # checks whether the exercise location is set by the user\n sl_ex_directory = env.config.sp_exercise_directory\n if sl_ex_directory is None:\n raise RuntimeError('The sp_exercise_directory sphinx config '\n 'value must be set.')\n # localise the directory if given as an absolute path\n if sl_ex_directory.startswith('/'):\n localised_directory = '.' + sl_ex_directory\n else:\n localised_directory = sl_ex_directory\n # check whether the directory exists\n if not os.path.exists(localised_directory):\n raise RuntimeError('The sp_exercise_directory ({}) does not '\n 'exist.'.format(localised_directory))\n\n # format the filename\n assert not label.endswith('.md')\n if label.startswith('ex:'):\n exercise_id = label[3:]\n elif label.startswith('sol:'):\n exercise_id = label[4:]\n else:\n raise RuntimeError('The label either has to start with \"ex:\" or '\n '\"sol:\".')\n\n filename = '{}.md'.format(exercise_id)\n exercise_path = os.path.join(localised_directory, filename)\n\n # ensure that the file exists\n sphinx_prolog.file_exists(exercise_path)\n\n # read the file\n with open(exercise_path, 'r') as f:\n exercise_content = f.read()\n\n # add this file to watch list for rebuilding this document\n env.note_dependency(exercise_path)\n\n return exercise_content", "def read_one(ppname):\n if ppname in pp_dict:\n pilotpoint = pp_dict.get(ppname)\n else:\n abort(\n 404, \"Person with last name {ppname} not found\".format(ppname=ppname)\n )\n return pilotpoint", "def ReadEEPROM(self,offset = 0, count = 64):\r\n if offset + count > 64:\r\n sys.stderr.write(\"offset + count too big, must be smaller or equal 64\")\r\n raise\r\n buffer = create_string_buffer(count)\r\n CALL('ReadEEPROM',self,INT(offset),buffer,INT(count))\r\n return buffer.value", "def read(name):\n\n return open(name).read()", "def systemRead():\n return", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress" ]
[ "0.5775257", "0.56736845", "0.5344736", "0.5199725", "0.5198908", "0.5132048", "0.5058452", "0.50483435", "0.50442195", "0.50354457", "0.5030674", "0.5013057", "0.49539334", "0.4930905", "0.49156946", "0.4905181", "0.48967895", "0.4886876", "0.48672244", "0.48424646", "0.4829353", "0.48252207", "0.4821853", "0.4797124", "0.4793517", "0.47443876", "0.47282138", "0.4726255", "0.47111407", "0.46900988" ]
0.6138367
0
Returns created or updated environment asset.
def create_or_update(self, environment: Environment) -> Environment: try: if not environment.version and environment._auto_increment_version: environment.version = _get_next_version_from_container( name=environment.name, container_operation=self._containers_operations, resource_group_name=self._operation_scope.resource_group_name, workspace_name=self._workspace_name, registry_name=self._registry_name, **self._kwargs, ) sas_uri = None if self._registry_name: if isinstance(environment, WorkspaceAssetReference): # verify that environment is not already in registry try: self._version_operations.get( name=environment.name, version=environment.version, resource_group_name=self._resource_group_name, registry_name=self._registry_name, ) except Exception as err: # pylint: disable=broad-except if isinstance(err, ResourceNotFoundError): pass else: raise err else: msg = "A environment with this name and version already exists in registry" raise ValidationException( message=msg, no_personal_data_message=msg, target=ErrorTarget.ENVIRONMENT, error_category=ErrorCategory.USER_ERROR, ) environment_rest = environment._to_rest_object() result = self._service_client.resource_management_asset_reference.begin_import_method( resource_group_name=self._resource_group_name, registry_name=self._registry_name, body=environment_rest, **self._kwargs, ).result() if not result: env_rest_obj = self._get(name=environment.name, version=environment.version) return Environment._from_rest_object(env_rest_obj) sas_uri = get_sas_uri_for_registry_asset( service_client=self._service_client, name=environment.name, version=environment.version, resource_group=self._resource_group_name, registry=self._registry_name, body=get_asset_body_for_registry_storage( self._registry_name, "environments", environment.name, environment.version, ), ) environment = _check_and_upload_env_build_context( environment=environment, operations=self, sas_uri=sas_uri, show_progress=self._show_progress ) env_version_resource = environment._to_rest_object() env_rest_obj = ( self._version_operations.begin_create_or_update( name=environment.name, version=environment.version, registry_name=self._registry_name, body=env_version_resource, **self._scope_kwargs, **self._kwargs, ).result() if self._registry_name else self._version_operations.create_or_update( name=environment.name, version=environment.version, workspace_name=self._workspace_name, body=env_version_resource, **self._scope_kwargs, **self._kwargs, ) ) if not env_rest_obj and self._registry_name: env_rest_obj = self._get(name=environment.name, version=environment.version) return Environment._from_rest_object(env_rest_obj) except Exception as ex: # pylint: disable=broad-except if isinstance(ex, SchemaValidationError): log_and_raise_error(ex) else: raise ex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asset(self):\n\t\treturn self._asset", "def get_asset(self, name):\n assert self.has_asset(name), \"Asset is not created yet, use has_asset for checking\"\n return self.assets[name]", "def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)", "def get_environment() -> Environment:\n return Environment(\n media_url=get_endpoint(\"MEDIA\"),\n datastore_reader_url=get_endpoint(\"DATASTORE_READER\"),\n datastore_writer_url=get_endpoint(\"DATASTORE_WRITER\"),\n vote_url=get_endpoint(\"VOTE\"),\n )", "def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)", "def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)", "def getAssetInfo(self):\n return self._AssetInfo", "def snapshot():\n return Env(os.environ)", "def GetAssetResourceSpec():\n return concepts.ResourceSpec(\n 'dataplex.projects.locations.lakes.zones.assets',\n resource_name='assets',\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n locationsId=LocationAttributeConfig(),\n lakesId=LakeAttributeConfig(),\n zonesId=ZoneAttributeConfig(),\n assetsId=AssetAttributeConfig())", "def _get_environment(self):\n if self._cache.get(\"_environment\") is None:\n name = self.get(\"environmentname\", \"default\")\n if name:\n db = self.session\n try:\n env = db.query(models.Environment).filter(models.Environment.name==name).one()\n except config.NoResultFound as err:\n raise config.ConfigError(\"Bad environmentname %r: %s\" % (name, err))\n username = self.get(\"username\") # username should be set by test runner\n if username:\n if env.is_owned():\n if env.owner.username != username:\n raise config.ConfigError(\"Environment is currently owned by: %s\" % (env.owner,))\n env.set_owner_by_username(db, username)\n env = EnvironmentRuntime(db, env, self.logfile)\n self._cache[\"_environment\"] = env\n else:\n raise config.ConfigError, \"Bad environmentname %r.\" % (name,)\n return self._cache[\"_environment\"]", "def getAssetWithName(self, name):\n return self.__assets[name]", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)", "def get_asset(self, short_name):\n return self._assets[short_name]", "def test_create_system_asset(self):\n pass", "def environment(self) -> pulumi.Output['outputs.EnvironmentResponse']:\n return pulumi.get(self, \"environment\")", "def get_asset(location, filename):\r\n return contentstore().find(Transcript.asset_location(location, filename))", "def test_update_system_asset(self):\n pass", "def _get_asset_json(display_name, date, location, thumbnail_location, locked):\r\n asset_url = location.to_deprecated_string()\r\n external_url = settings.LMS_BASE + asset_url\r\n return {\r\n 'display_name': display_name,\r\n 'date_added': get_default_time_display(date),\r\n 'url': asset_url,\r\n 'external_url': external_url,\r\n 'portable_url': StaticContent.get_static_path_from_location(location),\r\n 'thumbnail': thumbnail_location.to_deprecated_string() if thumbnail_location is not None else None,\r\n 'locked': locked,\r\n # Needed for Backbone delete/update.\r\n 'id': unicode(location)\r\n }", "def test_retrieve_system_asset(self):\n pass", "def set_asset(self, asset, expiration, timestamp):\n return self \\\n .asset_id(asset.asset_id) \\\n .fold() \\\n .coalesce(\n # The asset exists.\n __.unfold()\n .choose(\n __.values('first_seen').is_(P.gt(timestamp)),\n __.property(Cardinality.single, 'first_seen', timestamp),\n __.identity(),\n )\n .choose(\n __.values('last_seen').is_(P.lt(timestamp)),\n __.property(Cardinality.single, 'last_seen', timestamp)\n .property(Cardinality.single, 'expiration', expiration),\n __.identity(),\n )\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(True)),\n # The asset does not exist.\n __.addV('Asset')\n .property(T.id, str(uuid.uuid4()))\n .property(Cardinality.single, 'type', asset.asset_id.type)\n .property(\n Cardinality.single,\n 'identifier',\n asset.asset_id.identifier,\n )\n .property(Cardinality.single, 'first_seen', timestamp)\n .property(Cardinality.single, 'last_seen', timestamp)\n .property(Cardinality.single, 'expiration', expiration)\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(False)),\n )", "def GenerateAssetForUpdateRequestAlpha(args):\n module = dataplex_api.GetMessageModule()\n asset = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n discoverySpec=GenerateDiscoverySpec(args),\n )\n if args.IsSpecified('resource_read_access_mode'):\n setattr(\n asset,\n 'resourceSpec',\n module.GoogleCloudDataplexV1AssetResourceSpec(\n readAccessMode=(\n module.GoogleCloudDataplexV1AssetResourceSpec.ReadAccessModeValueValuesEnum(\n args.resource_read_access_mode\n )\n )\n ),\n )\n return asset", "def GenerateAssetForUpdateRequest(args):\n module = dataplex_api.GetMessageModule()\n return module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n discoverySpec=GenerateDiscoverySpec(args))", "def environment_created(self):\n\n pass", "def asset(atype, aname):\n if atype not in ('css', 'js'):\n raise template.TemplateSyntaxError('Type can only be one of css or js.')\n\n if aname not in ASSETS[atype]:\n raise ValueError('Invalid asset: %r' % aname)\n\n meta = ASSETS[atype][aname]\n\n return {\n 'USE_MINIFIED': USE_MINIFIED,\n 'type': atype,\n 'asset': aname,\n 'meta': meta,\n }", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)", "def sid(self) -> Asset:\n return self.asset", "def prepare_env(self, project=None, env=None):\n if project == None or env == None:\n return [None, '']\n else:\n memory_file = BytesIO()\n with zipfile.ZipFile(memory_file, 'w') as zf:\n if env.bundle != None and env.bundle.storage != '':\n try:\n bundle_buffer = StringIO()\n if 'http://' in env.bundle.storage or 'https://' in env.bundle.storage:\n bundle_buffer = self.web_get_file(env.bundle.storage)\n else:\n bundle_buffer = self.storage_get_file('bundle', env.bundle.storage)\n\n data = zipfile.ZipInfo(\"bundle.%s\"%(env.bundle.storage.split(\"/\")[-1].split(\".\")[-1]))\n data.date_time = time.localtime(time.time())[:6]\n data.compress_type = zipfile.ZIP_DEFLATED\n data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx\n zf.writestr(data, bundle_buffer.read())\n except:\n print(traceback.print_exc())\n\n try:\n json_buffer = StringIO()\n json_buffer.write(env.to_json())\n json_buffer.seek(0)\n\n data = zipfile.ZipInfo(\"env.json\")\n data.date_time = time.localtime(time.time())[:6]\n data.compress_type = zipfile.ZIP_DEFLATED\n data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx\n zf.writestr(data, json_buffer.read())\n except:\n print(traceback.print_exc())\n memory_file.seek(0)\n\n return [memory_file, \"project-%s-env-%s.zip\"%(str(project.id), str(env.id))]", "def asset(self, asset_id, asset_type, action='GET'):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n asset_methods = {\n 'handle': self.tc_requests.adversary_handle_asset,\n 'phone': self.tc_requests.adversary_phone_asset,\n 'url': self.tc_requests.adversary_url_asset,\n }\n\n # handle invalid input\n if asset_methods.get(asset_type.lower()) is None:\n self._handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n\n return asset_methods[asset_type.lower()](self.unique_id, asset_id, action=action)", "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)" ]
[ "0.61956143", "0.61238104", "0.58050895", "0.5628729", "0.56143606", "0.5594775", "0.5588402", "0.5505756", "0.549205", "0.5458131", "0.5398734", "0.5353036", "0.53321236", "0.532311", "0.53202885", "0.5256134", "0.52190095", "0.5204874", "0.5187553", "0.5187243", "0.5174594", "0.516894", "0.51643497", "0.514695", "0.51344347", "0.51263505", "0.5116946", "0.5098547", "0.5088736", "0.5082291" ]
0.61968577
0
Archive an environment or an environment version.
def archive( self, name: str, version: Optional[str] = None, label: Optional[str] = None, **kwargs, # pylint:disable=unused-argument ) -> None: name = _preprocess_environment_name(name) _archive_or_restore( asset_operations=self, version_operation=self._version_operations, container_operation=self._containers_operations, is_archived=True, name=name, version=version, label=label, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ArchiveChromeEbuildEnv(self):\n files = glob.glob(os.path.join(self._pkg_dir, constants.CHROME_CP) + '-*')\n if not files:\n raise artifact_stages.NothingToArchiveException(\n 'Failed to find package %s' % constants.CHROME_CP)\n if len(files) > 1:\n logging.PrintBuildbotStepWarnings()\n logging.warning('Expected one package for %s, found %d',\n constants.CHROME_CP, len(files))\n\n chrome_dir = sorted(files)[-1]\n env_bzip = os.path.join(chrome_dir, 'environment.bz2')\n with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:\n # Convert from bzip2 to tar format.\n bzip2 = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)\n cros_build_lib.RunCommand(\n [bzip2, '-d', env_bzip, '-c'],\n log_stdout_to_file=os.path.join(tempdir, constants.CHROME_ENV_FILE))\n env_tar = os.path.join(self.archive_path, constants.CHROME_ENV_TAR)\n cros_build_lib.CreateTarball(env_tar, tempdir)\n self._upload_queue.put([os.path.basename(env_tar)])", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()", "def restore(\n self,\n name: str,\n version: Optional[str] = None,\n label: Optional[str] = None,\n **kwargs, # pylint:disable=unused-argument\n ) -> None:\n name = _preprocess_environment_name(name)\n _archive_or_restore(\n asset_operations=self,\n version_operation=self._version_operations,\n container_operation=self._containers_operations,\n is_archived=False,\n name=name,\n version=version,\n label=label,\n )", "def archive(self, namespace, archive_name, namespace_out=None, format=None):\n raise RuntimeError('Already achieved')", "def deploy(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"live\", verbose=verbose, log=log, app=app, archive=archive\n )", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def create_zip_file():\n shutil.make_archive(os.path.join(DIST_DIR, \"build\"), \"zip\", BUILD_DIR)", "def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip':\n archive = ZipTarWrapper(location.name, 'w', zipfile.ZIP_DEFLATED)\n else:\n write_type = \"w\"\n if compression:\n write_type = \"w|{0}\".format(compression)\n archive = tarfile.open(location.name, write_type)\n\n # Add all the things to the archive\n for path_spec in paths:\n path_spec.add_to_tar(archive, environment)\n\n # Finish the zip\n archive.close()\n\n return archive", "def environment(args, env_name=None):\n\n chefserver = open_chef_connection(args)\n if env_name is None:\n env_name = args.get('name')\n env = chefserver.get_env(name=env_name)\n env_attrs = env.to_dict()\n backup_attributes(\n backup_dict=env_attrs,\n name='%s_Environment' % env_name\n )\n new_env = _package_upgrades(\n args=args, env_attrs=_super_munger(\n quantum_name_check(\n args, env_attrs\n )\n )\n )\n\n chefserver.put_env(old_env=env_name, new_env=new_env)", "def prepare_env(self, project=None, env=None):\n if project == None or env == None:\n return [None, '']\n else:\n memory_file = BytesIO()\n with zipfile.ZipFile(memory_file, 'w') as zf:\n if env.bundle != None and env.bundle.storage != '':\n try:\n bundle_buffer = StringIO()\n if 'http://' in env.bundle.storage or 'https://' in env.bundle.storage:\n bundle_buffer = self.web_get_file(env.bundle.storage)\n else:\n bundle_buffer = self.storage_get_file('bundle', env.bundle.storage)\n\n data = zipfile.ZipInfo(\"bundle.%s\"%(env.bundle.storage.split(\"/\")[-1].split(\".\")[-1]))\n data.date_time = time.localtime(time.time())[:6]\n data.compress_type = zipfile.ZIP_DEFLATED\n data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx\n zf.writestr(data, bundle_buffer.read())\n except:\n print(traceback.print_exc())\n\n try:\n json_buffer = StringIO()\n json_buffer.write(env.to_json())\n json_buffer.seek(0)\n\n data = zipfile.ZipInfo(\"env.json\")\n data.date_time = time.localtime(time.time())[:6]\n data.compress_type = zipfile.ZIP_DEFLATED\n data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx\n zf.writestr(data, json_buffer.read())\n except:\n print(traceback.print_exc())\n memory_file.seek(0)\n\n return [memory_file, \"project-%s-env-%s.zip\"%(str(project.id), str(env.id))]", "def test_archive() -> None:\n\n rule_runner = PythonRuleRunner(\n rules=[\n *target_type_rules(),\n *pex_from_targets.rules(),\n *package_pex_binary.rules(),\n *python_target_type_rules.rules(),\n QueryRule(BuiltPackage, [ArchiveFieldSet]),\n ],\n target_types=[ArchiveTarget, FilesGeneratorTarget, RelocatedFiles, PexBinary],\n )\n rule_runner.set_options([], env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"})\n\n rule_runner.write_files(\n {\n \"resources/d1.json\": \"{'k': 1}\",\n \"resources/d2.json\": \"{'k': 2}\",\n \"resources/BUILD\": dedent(\n \"\"\"\\\n files(name='original_files', sources=['*.json'])\n\n relocated_files(\n name='relocated_files',\n files_targets=[':original_files'],\n src=\"resources\",\n dest=\"data\",\n )\n \"\"\"\n ),\n \"project/app.py\": \"print('hello world!')\",\n \"project/BUILD\": \"pex_binary(entry_point='app.py')\",\n \"BUILD\": dedent(\n \"\"\"\\\n archive(\n name=\"archive1\",\n packages=[\"project\"],\n files=[\"resources:original_files\"],\n format=\"zip\",\n )\n\n archive(\n name=\"archive2\",\n packages=[\":archive1\"],\n files=[\"resources:relocated_files\"],\n format=\"tar\",\n output_path=\"output/archive2.tar\",\n )\n \"\"\"\n ),\n }\n )\n\n def get_archive(target_name: str) -> FileContent:\n tgt = rule_runner.get_target(Address(\"\", target_name=target_name))\n built_package = rule_runner.request(BuiltPackage, [ArchiveFieldSet.create(tgt)])\n digest_contents = rule_runner.request(DigestContents, [built_package.digest])\n assert len(digest_contents) == 1\n return digest_contents[0]\n\n def assert_archive1_is_valid(zip_bytes: bytes) -> None:\n io = BytesIO()\n io.write(zip_bytes)\n with zipfile.ZipFile(io) as zf:\n assert set(zf.namelist()) == {\n \"resources/d1.json\",\n \"resources/d2.json\",\n \"project/project.pex\",\n }\n with zf.open(\"resources/d1.json\", \"r\") as f:\n assert f.read() == b\"{'k': 1}\"\n with zf.open(\"resources/d2.json\", \"r\") as f:\n assert f.read() == b\"{'k': 2}\"\n\n archive1 = get_archive(\"archive1\")\n assert_archive1_is_valid(archive1.content)\n\n archive2 = get_archive(\"archive2\")\n assert archive2.path == \"output/archive2.tar\"\n io = BytesIO()\n io.write(archive2.content)\n io.seek(0)\n with tarfile.open(fileobj=io, mode=\"r:\") as tf:\n assert set(tf.getnames()) == {\"data/d1.json\", \"data/d2.json\", \"archive1.zip\"}\n\n def get_file(fp: str) -> bytes:\n reader = tf.extractfile(fp)\n assert reader is not None\n return reader.read()\n\n assert get_file(\"data/d1.json\") == b\"{'k': 1}\"\n assert get_file(\"data/d2.json\") == b\"{'k': 2}\"\n assert_archive1_is_valid(get_file(\"archive1.zip\"))", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def make_version_file(version_label, dockerrun=None, ebext=None):\n dockerrun = dockerrun or DOCKERRUN_NAME\n ebext = ebext or DOCKEREXT_NAME\n\n tempd = tempfile.mkdtemp()\n try:\n deploy_dockerrun = os.path.join(tempd, DOCKERRUN_NAME)\n deploy_ebext = os.path.join(tempd, DOCKEREXT_NAME)\n shutil.copyfile(dockerrun, deploy_dockerrun)\n shutil.copytree(ebext, deploy_ebext)\n return shutil.make_archive(version_label, 'zip', root_dir=tempd)\n finally:\n shutil.rmtree(tempd)", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def archive(self, header, target: str, output_target: str = None):\n\t\ttry:\n\t\t\ttarget = os.path.abspath(target)\n\t\t\tif output_target:\n\t\t\t\toutfile = output_target\n\t\t\telse:\n\t\t\t\tif os.path.isfile(target):\n\t\t\t\t\toutfile = target + \".edz\"\n\t\t\t\telif os.path.isdir(target):\n\t\t\t\t\toutfile = os.path.join(target, os.path.basename(target) + \".edz\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"No valid output target\")\n\t\t\t\t\treturn\n\t\t\t#Zip target dir\n\t\t\tprint(f\"Creating virtual zip of {target}\")\n\t\t\tzip_bytes_object = zipit(target)\n\t\t\tprint(f\"Making .edz at {outfile}...\")\n\t\t\twith open(outfile, \"w+b\") as out:\n\t\t\t\tprint(\"Writing header...\")\n\t\t\t\tout.write(header)\n\t\t\t\tprint(\"Writing zip contents...\")\n\t\t\t\tout.write(zip_bytes_object.getvalue())\n\t\t\tprint(\"Success!\")\n\t\t\treturn outfile\n\t\texcept Exception as e:\n\t\t\tprint(f\"Failed to create edizip from target {target} - {e}\")\n\t\t\tprint(\"Attempting cleanup...\")\n\t\t\ttry:\n\t\t\t\tif os.path.isfile(outfile):\n\t\t\t\t\tprint(f\"Removing possibly invalid archive {outfile}\")\n\t\t\t\t\tos.remove(outfile)\n\t\t\texcept:\n\t\t\t\tpass", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd('/tmp'):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n outpath = '/data/web_static/releases/{}'.format(root)\n try:\n putpath = api.put(archive_path)\n if files.exists(outpath):\n api.run('rm -rdf {}'.format(outpath))\n api.run('mkdir -p {}'.format(outpath))\n api.run('tar -xzf {} -C {}'.format(putpath[0], outpath))\n api.run('rm -f {}'.format(putpath[0]))\n api.run('mv -u {}/web_static/* {}'.format(outpath, outpath))\n api.run('rm -rf {}/web_static'.format(outpath))\n api.run('rm -rf /data/web_static/current')\n api.run('ln -s {} /data/web_static/current'.format(outpath))\n print('New version deployed!')\n except:\n return False\n else:\n return True", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd(\"/tmp\"):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n opath = \"/data/web_static/releases/{}\".format(root)\n try:\n ppath = api.put(archive_path)\n if files.exists(opath):\n api.run(\"rm -rdf {}\".format(opath))\n api.run(\"mkdir -p {}\".format(opath))\n api.run(\"tar -xzf {} -C {}\".format(ppath[0], opath))\n api.run(\"rm -f {}\".format(ppath[0]))\n api.run(\"mv -u {}/web_static/* {}\".format(opath, opath))\n api.run(\"rm -rf {}/web_static\".format(opath))\n api.run(\"rm -rf /data/web_static/current\")\n api.run(\"ln -sf {} /data/web_static/current\".format(opath))\n print(\"New version deployed!\")\n except:\n return False\n else:\n return True", "def do_deploy(archive_path):\n\n if not os.path.exists(archive_path):\n return(False)\n try:\n put(archive_path, \"/tmp/\")\n folder_path = \"/data/web_static/releases/\" + archive_path[9:-4]\n name_file = archive_path[9:]\n name_folder = archive_path[9:-4]\n date = archive_path[21:-4]\n releases = \"/data/web_static/releases/\"\n\n run(\"mkdir -p {}\".format(folder_path))\n run(\"tar -xzf /tmp/{} -C {}\".format(name_file, folder_path))\n run(\"rm /tmp/{}\".format(name_file))\n run(\"mv {}{}/web_static/* {}{}/\"\n .format(releases, name_folder, releases, name_folder))\n run(\"rm -rf {}{}/web_static\".format(releases, name_folder))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {} /data/web_static/current\".format(folder_path))\n print(\"New version deployed!\")\n\n return(True)\n except BaseException:\n return (False)", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd(\"/tmp\"):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n opath = \"/data/web_static/releases/{}\".format(root)\n try:\n ppath = api.put(archive_path)\n if files.exists(opath):\n api.run(\"rm -rdf {}\".format(opath))\n api.run(\"mkdir -p {}\".format(opath))\n api.run(\"tar -xzf {} -C {}\".format(ppath[0], opath))\n api.run(\"rm -f {}\".format(ppath[0]))\n api.run(\"mv -u {}/web_static/* {}\".format(opath, opath))\n api.run(\"rm -rf {}/web_static\".format(opath))\n api.run(\"rm -rf /data/web_static/current\")\n api.run(\"ln -s {} /data/web_static/current\".format(opath))\n print(\"New version deployed!\")\n except:\n return False\n else:\n return True", "def _archive_repository(\n owner: str, project_name: str, secret_token: str\n) -> Tuple[bool, str]:\n project_settings = {\"archived\": \"true\"}\n\n headers = {\n \"Authorization\": f\"token {secret_token}\",\n }\n\n url = f\"https://{REST_HOST}/repos/{owner}/{project_name}\"\n\n response = patch(url, json=project_settings, headers=headers, verify=VERIFY_CERT)\n return response.ok, (\n f\"Status: {response.status_code}. \" f'Error: \"{response.text}\".'\n )", "def archive(mongo_backup_file):\r\n filename = get_archive_filename()\r\n tar = tarfile.open(filename, \"w|gz\")\r\n tar.add(mongo_backup_file)\r\n tar.close()\r\n\r\n return filename", "def archive(self, dest, rev=None, no_decode=False, prefix=None, type=None,\n subrepos=False, include=None, exclude=None):\n\n # Normalise the input\n rev = self._map_one_rev(rev)\n\n eh = SimpleErrorHandler()\n\n self._client.execute('archive', dest, r=rev, no_decode=no_decode,\n p=prefix, t=type, S=subrepos, I=include,\n X=exclude, eh=eh)\n\n return bool(eh)", "def do_deploy(archive_path):\n\n if not os.path.exists(archive_path):\n return False\n\n ret = True\n\n tmpfolder = put(archive_path, '/tmp/')\n\n if tmpfolder.failed:\n ret = False\n\n dirc = archive_path.replace(\".tgz\", \"\").replace(\"versions/\", \"\")\n dest = run('mkdir -p /data/web_static/releases/' + dirc + '/')\n\n if dest.failed:\n ret = False\n\n unpack = run('tar -xzf /tmp/' + dirc + '.tgz' +\n ' -C /data/web_static/releases/' + dirc + '/')\n\n if unpack.failed:\n ret = False\n\n clean = run('rm /tmp/' + dirc + '.tgz')\n\n if clean.failed:\n ret = False\n\n move = run('mv /data/web_static/releases/' + dirc +\n '/web_static/* /data/web_static/releases/' + dirc + '/')\n\n if move.failed:\n ret = False\n\n cleanfolder = run('rm -rf /data/web_static/releases/' + dirc +\n '/web_static')\n\n if cleanfolder.failed:\n ret = False\n\n rmold = run('rm -rf /data/web_static/current')\n\n if rmold.failed:\n ret = False\n\n new = run('ln -sf /data/web_static/releases/' + dirc +\n '/' + ' /data/web_static/current')\n\n if new.failed:\n ret = False\n\n if ret:\n print(\"New version deployed!\")\n\n return ret", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def archive_experiment_and_alternatives(self, experiment):\n if not experiment:\n return\n\n experiment.archived = True\n experiment.live = False\n experiment.put()\n\n alts = self.get_alternatives(experiment.name)\n for alternative in alts:\n alternative.archived = True\n alternative.live = False\n\n db.put(alts)\n\n self.remove_from_cache(experiment)", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def do_deploy(archive_path):\n if not os.path.exists(archive_path):\n return False\n else:\n try:\n put(archive_path, \"/tmp/\")\n filename = archive_path.split('/')\n no_ext = filename[-1].split('.')\n archive = no_ext[0]\n run(\"mkdir -p /data/web_static/releases/\" + archive + \"/\")\n run(\"tar -zxf /tmp/\" + filename[1] +\n \" -C /data/web_static/releases/\" +\n archive + \"/\")\n run(\"rm /tmp/\" + filename[1])\n run(\"mv /data/web_static/releases/\" + archive +\n \"/web_static/* /data/web_static/releases/\" + archive + \"/\")\n run(\"rm -rf /data/web_static/releases/\" + archive + \"/web_static\")\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/\" + archive +\n \"/ /data/web_static/current\")\n print(\"New version deployed!\")\n return True\n except:\n return False" ]
[ "0.64604753", "0.62564677", "0.6224388", "0.587912", "0.5624461", "0.55861765", "0.53567517", "0.53505874", "0.53499496", "0.5349092", "0.5345595", "0.5324937", "0.5316224", "0.52405334", "0.5240352", "0.517459", "0.51526314", "0.5145824", "0.5133149", "0.5131143", "0.51297665", "0.5127689", "0.5106147", "0.50670606", "0.5038915", "0.50263965", "0.5021822", "0.50078243", "0.5006462", "0.5006215" ]
0.69926417
0
Restore an archived environment version.
def restore( self, name: str, version: Optional[str] = None, label: Optional[str] = None, **kwargs, # pylint:disable=unused-argument ) -> None: name = _preprocess_environment_name(name) _archive_or_restore( asset_operations=self, version_operation=self._version_operations, container_operation=self._containers_operations, is_archived=False, name=name, version=version, label=label, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restore(self, archive):\n logger.info(\"Restoring an old archive run from {}\".format(archive))\n if os.path.isabs(archive):\n restorefile = archive\n else:\n restorefile = os.path.join(self.containerpath, const.ARCHIVEDIR, archive)\n with ignored(OSError):\n shutil.rmtree(os.path.join(self.rundir))\n with tarfile.open(restorefile, \"r:gz\") as f:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(f, self.rundir)\n self._refreshconfig()", "def restore(cls):\n util.restore_infiles()\n util.restore_binaries()", "def restore_ehr_version(self, ehr_record, version):\n self._check_unecessary_restore(ehr_record)\n return self.version_manager.restore_revision(ehr_record.record_id, version)", "def restore(self, oid, serial, data, version, prev_txn, transaction):\n assert not version\n self._check_trans(transaction, 'restore')\n self._async('restorea', oid, serial, data, prev_txn, id(transaction))", "def __restoreBackup(self):\n pass #FIXME!!!", "def restore_version(version, request):\n if not version.is_disabled:\n raise ApiError(\n \"Unable to restore this version: status is not disabled.\"\n )\n version.is_disabled = False\n version.save()", "def restore_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n loaded_archives = self.load_archives_from_s3()\n\n with glacier_shelve() as d:\n archives = {}\n for a in loaded_archives:\n print a\n archives[a[\"filename\"]] = a[\"archive_id\"]\n d[\"archives\"] = archives\n else:\n raise Exception(\"You must set s3_bucket in order to backup/restore inventory to/from S3.\")", "def restore(self, checkpoint):\n raise NotImplementedError", "def restore_old_install(self):\n USER.info('%s: Restoring Old Install', self.recipe.name)\n shutil.move(self.back_dir, self.recipe.install_dir)\n pakit.conf.IDB[self.recipe.name] = self.old_entry\n walk_and_link(self.recipe.install_dir, self.recipe.link_dir)", "def restore_original_ehr(self, ehr_record):\n self._check_unecessary_restore(ehr_record)\n return self.version_manager.restore_original(ehr_record.record_id)", "def restore(self):\n\n self.brain.restore_checkpoint()", "def restore(self, path, rev):\n path = \"/restore/%s%s\" % (self.session.root, format_path(path))\n\n params = {\n 'rev': rev,\n }\n\n url, params, headers = self.request(path, params)\n\n return self.rest_client.POST(url, params, headers)", "def restore_session(self, dir_model):\n self.logger.info(\"Reloading the latest trained model...\")\n self.saver.restore(self.session, dir_model)", "def restore(ctx, destination, filesystem, backup_time):\n config_path = ctx.obj['config_path']\n\n config = Config(config_path)\n job = config.jobs.get(filesystem)\n\n if job is None:\n print('Filesystem does not exist.')\n sys.exit(1)\n\n job.restore(backup_time, destination)\n\n print('Restore successful.')", "def restore(self):\n\t\treturn Job(SDK.PrlVm_Restore(self.handle)[0])", "def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True", "def restore(version_manager, request):\n version_manager.is_disabled = False\n version_manager.save()\n return version_manager", "def restore(self, PATH):\n self._saver.restore(self._sess, PATH)", "def restore(self, dest: str, remove_existing: bool = False):\n if os.path.isdir(dest):\n dest = os.path.join(dest, \"lightningd.sqlite3\")\n if os.path.exists(dest):\n if not remove_existing:\n raise ValueError(\n \"Destination for backup restore exists: {dest}\".format(\n dest=dest\n )\n )\n os.unlink(dest)\n\n self.db = self._db_open(dest)\n for c in tqdm(self.stream_changes(), total=self.version_count):\n if c.snapshot is not None:\n self._restore_snapshot(c.snapshot, dest)\n if c.transaction is not None:\n self._restore_transaction(c.transaction)\n self.db.commit()", "def _restore(self, a_path):\n super(RDPAnalyzer, self)._restore(a_path)\n self._model._restore()", "def restore_session(self, dir_model):\n self.logger.info(\"Reloading the latest trained model...\")\n self.saver.restore(self.sess, dir_model)", "def restore(self, restore):\n self._restore = restore", "def restore(self):\n raise NotImplementedError", "def restore(self, job, revision):\n\t\t\n\t\tjob_dict = self._get_job_dict(job)\n\t\tjob_dict['name'] = job\n\t\t\n\t\t# start restore process\n\t\tbackup = Backup(job_dict, self.db)\n\t\tbackup.restore(revision)", "def restore_session(self, dir_model):\r\n self.logger.info(\"Reloading the latest trained model...\")\r\n self.saver.restore(self.sess, tf.train.latest_checkpoint(dir_model))", "def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')", "def restore_object(Bucket=None, Key=None, VersionId=None, RestoreRequest=None, RequestPayer=None):\n pass", "def restore_full_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreSystemState(state_ref)\n self.ale.deleteState(state_ref)", "def restore_backup(self, backup, name, flavor, volume):\n return self._manager.restore_backup(backup, name, flavor, volume)", "def restore_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreState(state_ref)\n self.ale.deleteState(state_ref)" ]
[ "0.63505644", "0.6313094", "0.62881464", "0.6256433", "0.6234121", "0.621091", "0.60073245", "0.60008067", "0.5994659", "0.5933157", "0.5901754", "0.5885354", "0.5843295", "0.5842139", "0.5838413", "0.5836682", "0.5814803", "0.57988536", "0.5793264", "0.5792679", "0.5767203", "0.57649827", "0.5753781", "0.57412165", "0.5715796", "0.5705098", "0.5693891", "0.568859", "0.56780577", "0.56737447" ]
0.66135824
0
Share a environment asset from workspace to registry.
def share(self, name, version, *, share_with_name, share_with_version, registry_name) -> Environment: # Get workspace info to get workspace GUID workspace = self._service_client.workspaces.get( resource_group_name=self._resource_group_name, workspace_name=self._workspace_name ) workspace_guid = workspace.workspace_id workspace_location = workspace.location # Get environment asset ID asset_id = ASSET_ID_FORMAT.format( workspace_location, workspace_guid, AzureMLResourceType.ENVIRONMENT, name, version, ) environment_ref = WorkspaceAssetReference( name=share_with_name if share_with_name else name, version=share_with_version if share_with_version else version, asset_id=asset_id, ) with self._set_registry_client(registry_name): return self.create_or_update(environment_ref)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_or_update(self, environment: Environment) -> Environment:\n try:\n if not environment.version and environment._auto_increment_version:\n environment.version = _get_next_version_from_container(\n name=environment.name,\n container_operation=self._containers_operations,\n resource_group_name=self._operation_scope.resource_group_name,\n workspace_name=self._workspace_name,\n registry_name=self._registry_name,\n **self._kwargs,\n )\n sas_uri = None\n if self._registry_name:\n if isinstance(environment, WorkspaceAssetReference):\n # verify that environment is not already in registry\n try:\n self._version_operations.get(\n name=environment.name,\n version=environment.version,\n resource_group_name=self._resource_group_name,\n registry_name=self._registry_name,\n )\n except Exception as err: # pylint: disable=broad-except\n if isinstance(err, ResourceNotFoundError):\n pass\n else:\n raise err\n else:\n msg = \"A environment with this name and version already exists in registry\"\n raise ValidationException(\n message=msg,\n no_personal_data_message=msg,\n target=ErrorTarget.ENVIRONMENT,\n error_category=ErrorCategory.USER_ERROR,\n )\n\n environment_rest = environment._to_rest_object()\n result = self._service_client.resource_management_asset_reference.begin_import_method(\n resource_group_name=self._resource_group_name,\n registry_name=self._registry_name,\n body=environment_rest,\n **self._kwargs,\n ).result()\n\n if not result:\n env_rest_obj = self._get(name=environment.name, version=environment.version)\n return Environment._from_rest_object(env_rest_obj)\n\n sas_uri = get_sas_uri_for_registry_asset(\n service_client=self._service_client,\n name=environment.name,\n version=environment.version,\n resource_group=self._resource_group_name,\n registry=self._registry_name,\n body=get_asset_body_for_registry_storage(\n self._registry_name,\n \"environments\",\n environment.name,\n environment.version,\n ),\n )\n\n environment = _check_and_upload_env_build_context(\n environment=environment, operations=self, sas_uri=sas_uri, show_progress=self._show_progress\n )\n env_version_resource = environment._to_rest_object()\n env_rest_obj = (\n self._version_operations.begin_create_or_update(\n name=environment.name,\n version=environment.version,\n registry_name=self._registry_name,\n body=env_version_resource,\n **self._scope_kwargs,\n **self._kwargs,\n ).result()\n if self._registry_name\n else self._version_operations.create_or_update(\n name=environment.name,\n version=environment.version,\n workspace_name=self._workspace_name,\n body=env_version_resource,\n **self._scope_kwargs,\n **self._kwargs,\n )\n )\n if not env_rest_obj and self._registry_name:\n env_rest_obj = self._get(name=environment.name, version=environment.version)\n return Environment._from_rest_object(env_rest_obj)\n except Exception as ex: # pylint: disable=broad-except\n if isinstance(ex, SchemaValidationError):\n log_and_raise_error(ex)\n else:\n raise ex", "def export(self, **env):\n with self.lock:\n for key, value in env.items():\n self.environment[key] = value", "def share(source, dest, branch=None, revision=None):\n run_cmd(['hg', 'share', '-U', source, dest])\n return update(dest, branch=branch, revision=revision)", "async def _push_share(self, container, recipient, rights):\n client_url = os.environ.get(\"SWIFT_X_ACCOUNT_SHARING_URL\", None)\n if not client_url:\n logging.log(\n logging.ERROR,\n \"Swift X Account sharing API environment variables %s%s\",\n \"haven't been sourced. Please source the file if it is \",\n \"available, or download a new one from the storage UI.\",\n )\n async with swift_x_account_sharing_bind.SwiftXAccountSharing(\n client_url\n ) as client:\n await client.share_new_access(\n os.environ.get(\"OS_PROJECT_ID\", None),\n container,\n recipient,\n rights,\n self._get_address(),\n )", "def shareEditor(self, share):\n aw = self.activeWindow()\n if aw is not None:\n fn = aw.getFileName()\n if fn and e5App().getObject(\"Project\").isProjectFile(fn):\n aw.shareEditor(share)", "def upload_shared():\n # MARK: default copy to home dir\n put(conf.INS_ARGS['shared_folder'], '~/')", "def share():\n return True", "def share(self, value):\n self._tensor.share = value", "def share_to_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.add_shared(self)", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def share(local_port):\n env['local_port'] = local_port\n\n try:\n env['remote_port'] = env['port_map'][local_port]\n except KeyError:\n sys.exit('Port mapping does not exist for port %s' % local_port)\n \n sys.stdout.write('Sharing local port %(local_port)s on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n run('ssh -i %(pair_private_key)s -N -R 0.0.0.0:%(remote_port)s:localhost:%(local_port)s %(relay_user)s@%(relay_server)s' % env)", "def register_environment(self) -> Any:\n self.get_workspace()\n self.environment = Environment(self.environment_name)\n self.environment = self._set_environment_properties(self.environment)\n build = self.environment.build(self.ws)\n self.environment.register(self.ws)\n return build", "def Share(self, *args):\n return _RWStepGeom.RWStepGeom_RWOrientedSurface_Share(self, *args)", "def environment(self, environment):\n\n self._set_field(\"environment\", environment.get_json())", "def push_environment(cluster: Cluster, path: Optional[str] = None):\n log = get_logger(__name__)\n with stage_info(log, \"Pushing the environment to cluster.\"):\n try:\n remote_environment = deserialize_environment_from_cluster(\n cluster=cluster,\n path=path)\n except RuntimeError:\n log.info(\"Remote environment is missing, current environment will\"\n \" be copied to cluster.\")\n log.debug(\"Exception\", exc_info=1)\n remote_environment = EnvironmentImpl()\n\n local_environment = EnvironmentProvider().environment\n merged_environment = merge_environments(local=remote_environment,\n remote=local_environment)\n serialize_environment_to_cluster(environment=merged_environment,\n cluster=cluster,\n path=path)", "def share(observer, tid):\n try:\n manager = Actions()\n manager.share_task(observer, tid)\n except Exception as e:\n click.echo(e)", "def lock_environment(bucket, environment):\n s3.put_object(Bucket=bucket, Key=\"%s.lock\"%environment)", "def modify_share(self, pool, project, share, arg):\n svc = self.share_path % (pool, project, share)\n ret = self.rclient.put(svc, arg)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_('Error modifying %(arg)s '\n ' of share %(id)s.')\n % {'arg': arg,\n 'id': share})\n raise exception.ShareBackendException(msg=exception_msg)", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def sync_local_fabric_env(self):\n env.sync_filename = '/tmp/{0}_env.txt'.format(time.time())\n env_copy = self.env\n env_copy.use_ssh_config = False\n env_copy.host = False\n env_copy.host_string = False\n env_copy.local_deployment = True\n # TODO: add context from each need to repopulate\n with self.file.tmpfile(self.to_json(env_copy, cls=SilentEncoder)) as f:\n self.up(f.name, env.sync_filename)", "def environment(self, environment):\n\n self._environment = environment", "def environment(self, environment):\n\n self._environment = environment", "def environment(self, environment):\n\n self._environment = environment", "def asset(self, asset):\n\n self._asset = asset", "def save_env():\n global vis\n vis.save([vis.env])", "def share(job):\n client = get_dropbox_client()\n\n try:\n return client.share(\n '/Video Automation Platform/jobs/{job}/{job}.mov'.format(job=job))\n\n except ErrorResponse:\n return False", "def upload_resources(self):\n try:\n script_name, script_type, script_content = experiment_utils.get_source_script(self._context_symbol_table)\n\n if script_name:\n self.exp_metadata.script_name = script_name\n\n if script_content:\n file_utils.save_string(os.path.join(self.output_path, script_name), script_content)\n\n if script_type:\n self.exp_metadata.script_type = script_type\n\n except Exception as e:\n self.log.info(\"Failed to get script: \" + str(e))\n\n # TODO zip git directory here as well?\n\n if not self._env.is_connected():\n self.log.warning(\"Environment is not connected to Lab. Experiment data cannot be uploaded.\")\n return\n\n if self._tensorboard_path and os.path.isdir(self._tensorboard_path):\n remote_path = os.path.join(self.key,\n file_utils.get_folder_name(self._tensorboard_path) + \".zip\")\n tensorboard_key = self._env.upload_folder(self._tensorboard_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n if tensorboard_key:\n self.exp_metadata.resources.tensorboard_logs = tensorboard_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(tensorboard_key)\n\n if os.path.isfile(self.stdout_path):\n remote_path = os.path.join(self.key, file_utils.get_filename(self.stdout_path, exclude_extension=False))\n stdout_key = self._env.upload_file(self.stdout_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if stdout_key:\n self.exp_metadata.resources.stdout = stdout_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(stdout_key)\n\n if self.upload_code_script:\n # upload script file if available -> if file name was set and file exists in local folder\n if self.exp_metadata.script_name:\n script_path = os.path.join(self.output_path, self.exp_metadata.script_name)\n if os.path.isfile(script_path):\n remote_path = os.path.join(self.key, file_utils.get_filename(script_path, exclude_extension=False))\n script_file_key = self._env.upload_file(script_path,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if script_file_key:\n self.exp_metadata.resources.source_script = script_file_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(script_file_key)\n\n if self.upload_code_repo:\n # upload git repository if available\n git_root_dir = experiment_utils.get_git_root(self._exp_script_dir)\n if git_root_dir:\n # zip git repository with all files under 50 MB and ignore .git and environment folder\n zipped_repo = file_handler_utils.zip_folder(git_root_dir, max_file_size=50,\n excluded_folders=[\"environment\", \".git\"])\n if zipped_repo:\n remote_path = os.path.join(self.key,\n self._SOURCE_CODE_PACKAGE_NAME) # use original folder name?\n source_code_key = self._env.upload_file(zipped_repo,\n self._env.DataType.EXPERIMENT,\n file_name=remote_path,\n track_event=False)\n\n if source_code_key:\n self.exp_metadata.resources.source_code = source_code_key\n self.exp_metadata.resources.experiment_dir = os.path.dirname(source_code_key)", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:\n env = cls(*args)\n env.shared = True\n return env", "def path_extern_share(self) -> PurePath:\n return PurePath(self.path_extern_supervisor, SHARE_DATA)" ]
[ "0.5777924", "0.5638162", "0.5500348", "0.5376419", "0.5235503", "0.5227396", "0.5188119", "0.516572", "0.5142718", "0.5114613", "0.5113794", "0.5063201", "0.5045216", "0.4983797", "0.49475348", "0.49176106", "0.49024117", "0.48822293", "0.48794913", "0.4873375", "0.48162332", "0.48162332", "0.48162332", "0.4812399", "0.480003", "0.47735763", "0.4768746", "0.47440153", "0.47380707", "0.47357333" ]
0.70924634
0
Sets the registry client for the environment operations.
def _set_registry_client(self, registry_name: str) -> None: rg_ = self._operation_scope._resource_group_name sub_ = self._operation_scope._subscription_id registry_ = self._operation_scope.registry_name client_ = self._service_client environment_versions_operation_ = self._version_operations try: _client, _rg, _sub = get_registry_client(self._service_client._config.credential, registry_name) self._operation_scope.registry_name = registry_name self._operation_scope._resource_group_name = _rg self._operation_scope._subscription_id = _sub self._service_client = _client self._version_operations = _client.environment_versions yield finally: self._operation_scope.registry_name = registry_ self._operation_scope._resource_group_name = rg_ self._operation_scope._subscription_id = sub_ self._service_client = client_ self._version_operations = environment_versions_operation_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, client):\n if not client:\n raise SurvoxAPIMissingParameter('client')\n c = self.get()\n if not c:\n raise SurvoxAPIRuntime('No client available named: {name}'.format(name=self.name))\n return self.api_put(endpoint=self.url, data=client)", "def grr_set_client(line: Text) -> None:\n args = grr_set_client.parser.parse_args(shlex.split(line))\n magics_impl.grr_set_client_impl(args.hostname, args.client)", "def registry_host(self, registry_host: str):\n\n self._registry_host = registry_host", "def client_setup(self):\n self.client = Client()", "def register_client(self, client, client_name):\n self.clients[client_name] = client", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def register(self, client):\n self.clients.append(client)", "def oaas_registry() -> OaasRegistryStub:\n global _oaas_registry\n\n if _oaas_registry:\n LOG.debug(\"oaas_registry() Using cached registry client\")\n return _oaas_registry\n\n LOG.debug(\"oaas_registry() Using new registry client\")\n _oaas_registry = oaas.get_client(OaasRegistryStub) # type: ignore\n\n return _oaas_registry", "def setCooperationClient(self, client):\n self.__cooperationClient = client", "def init_client(self, client):\n self.client = client", "def set_target_registry(args):\n if 'target_registry' not in args:\n return\n\n if args['target_registry'] == '':\n args['target_registry'] = None\n return\n\n args['target_registry'] = (\n AuthenticatedRegistry.query.filter_by(\n base_name=args['target_registry'])).first()\n\n if args['target_registry'] is None:\n raise NoModelError('Registry')", "def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e", "def client_extensions(self, client_extensions):\n\n self._client_extensions = client_extensions", "def client_extensions(self, client_extensions):\n\n self._client_extensions = client_extensions", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def _init_keystone_admin_client(self, api_version):\n self.keystone_sentry = self.d.sentry['keystone'][0]\n keystone_ip = self.keystone_sentry.info['public-address']\n if self._get_openstack_release() >= self.xenial_queens:\n api_version = 3\n client_class = keystone_client.Client\n if api_version == 3:\n client_class = keystone_client_v3.Client\n session, auth = u.get_keystone_session(\n keystone_ip,\n api_version=api_version,\n username='admin',\n password='openstack',\n project_name='admin',\n user_domain_name='admin_domain',\n project_domain_name='admin_domain')\n self.keystone = client_class(session=session)\n self.keystone.auth_ref = auth.get_access(session)", "def set(self, shell=None):\n\n # iterate over the env variable objects and set them in the env\n for var in self._vars.itervalues():\n var.set(shell=shell)", "def set_up_client():\n #creating new flask app and a test client\n app = create_app('test')\n client = app.test_client()\n\n #creating the application context and\n #allowing test functions to run by calling test client\n #and finally cleaning house\n ctx = app.app_context()\n ctx.push()\n yield client\n ctx.pop()", "def initialize_registry(self):\n client = self.application.__init_blockchain_client__()\n response = client.initialize()\n client.close()\n\n return response", "def registry(self):\n if self._registry is None:\n print('Creating container registry...')\n registry_ops = self.registry_client.registries\n try:\n registry = registry_ops.get(\n self.resources.group.name,\n self.name,\n )\n except CloudError:\n registry_creation = registry_ops.create(\n self.resources.group.name,\n self.name,\n RegistryCreateParameters(\n location=self.storage.account.location,\n sku=ContainerRegistrySku(ContainerRegistrySkuName.basic),\n admin_user_enabled=True,\n storage_account=StorageAccountParameters(\n self.storage.account.name,\n self.storage.key,\n ),\n )\n )\n registry = registry_creation.result()\n self._registry = registry\n print('Got container registry:', registry.name)\n return self._registry", "def env(self, env):\n\n self._env = env", "def env(self, env):\n\n self._env = env", "def setUp(self) -> None:\n\n self.federal_client = FederalRegister()", "def registry_version(self, registry_version):\n\n self._registry_version = registry_version", "def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)", "def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None, nx=False):\r\n if client is None:\r\n key = self.make_key(key, version=version)\r\n client = self.get_server(key)\r\n\r\n return super(ShardClient, self).set(key=key, value=value,\r\n timeout=timeout, version=version,\r\n client=client, nx=nx)", "def set(self, value: str):\n self.openshift.do_action(\"set\", [\"env\", self.environ.resource_type, self.deployment, f\"{self.name}={value}\"])\n # pylint: disable=protected-access\n self.environ.wait_for_resource(self.deployment)", "def set_entity_class_registry(self, entity_class_registry):\n self.entity_class_registry = entity_class_registry", "def register_env_creator(self):\n raise NotImplementedError(\"Subclasses should implement this to call ray.tune.registry.register_env\")", "def to_k8s_client_obj(self):\n return k8s.V1EnvVar(\n name=self.name,\n value_from=k8s.V1EnvVarSource(field_ref=k8s.V1ObjectFieldSelector(field_path=self.field_path)),\n )" ]
[ "0.6370861", "0.6181394", "0.5674733", "0.56536514", "0.56119114", "0.56051415", "0.56028444", "0.5592929", "0.54459864", "0.54050434", "0.5392513", "0.53147316", "0.5255586", "0.5255586", "0.5170892", "0.5153842", "0.51261556", "0.5072216", "0.5055539", "0.50308275", "0.50156486", "0.50156486", "0.5004277", "0.49952102", "0.49721932", "0.49463788", "0.49435744", "0.4940646", "0.48956877", "0.48906258" ]
0.7440585
0
Generators tensors for BERT
def generate_bert_tensor_data(dataset, tokenizer, MAX_LEN): # initializes dataset.bert_tokens, dataset.bert_labels dataset.generate_bert_tokens(tokenizer) bert_token_ids = [] attn_masks = [] segment_ids = [] for tokens in dataset.bert_tokens: padded_tokens = tokens + ['[PAD]' for _ in range(MAX_LEN - len(tokens))] attn_mask = [1 if token != '[PAD]' else 0 for token in padded_tokens] seg_ids = [0 for _ in range(len(padded_tokens))] token_ids = tokenizer.convert_tokens_to_ids(padded_tokens) bert_token_ids.append(token_ids) attn_masks.append(attn_mask) segment_ids.append(seg_ids) bert_labels = to_tensor_labels(dataset.bert_labels, MAX_LEN) return torch.tensor(bert_token_ids), torch.tensor(segment_ids), torch.tensor(attn_masks), bert_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)", "def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False): \n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n \n with torch.set_grad_enabled(False):\n embeddings = {'ids': [],\n 'embeddings': [],\n 'labels': []\n }\n \n # get BERT training embeddings\n \n if metadata:\n for local_ids, local_data, local_meta, local_labels in data_generator:\n local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \\\n local_meta, \\\n local_labels.to(device).long()\n\n #print(local_data[0].shape)\n augmented_embeddings = embedding_model(local_data, local_meta)\n\n embeddings['ids'].extend(np.array(local_ids))\n embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))\n embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))\n else:\n for local_ids, local_data, local_labels in data_generator:\n local_data, local_labels = local_data.to(device).long().squeeze(1), \\\n local_labels.to(device).long()\n #print(local_data[0].shape)\n augmented_embeddings = embedding_model(local_data)\n\n embeddings['ids'].extend(np.array(local_ids))\n embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))\n embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))\n \n return embeddings", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def build_bert_input(data, data_path, tokenizer):\n\n cache_fp = f\"{data_path[:data_path.rfind('.')]}_{type(tokenizer).__name__}_{str(BERT_MAX_LEN)}_cache\"\n if os.path.isfile(cache_fp): \n logger.info(\"Loading tokenized data from cache...\")\n all_samples = torch.load(cache_fp)\n return all_samples\n\n bert_sequences = [] \n\n # modification for turn classification task \n if 'turn' in data_path:\n for instance in data:\n seq = \"[CLS] {} [SEP] {} [SEP]\".format(instance['p'], instance['r'])\n bert_sequences.append([instance['label'], seq])\n\n # regular yes-and classifier \n else: \n \n for k in data['non-yesands'].keys():\n for non_yesand in data['non-yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(non_yesand['p'], non_yesand['r'])\n bert_sequences.append([0, seq])\n \n for k in data['yesands'].keys(): \n for yesand in data['yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(yesand['p'], yesand['r'])\n bert_sequences.append([1, seq])\n\n sentences = [x[1] for x in bert_sequences]\n labels = [x[0] for x in bert_sequences]\n logger.info(\"Tokenizing loaded data...\")\n tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n\n\n # cache_fp = data_path[:data_path.rfind('.')] + \"_\" + type(tokenizer).__name__\n # if os.path.isfile(cache_fp): \n # logger.info(\"Loading tokenized data from cache...\")\n # tokenized_texts = torch.load(cache_fp)\n # else: \n # logger.info(\"Tokenizing loaded data...\")\n # # tokenize with BERT tokenizer \n # tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n # torch.save(tokenized_texts, cache_fp)\n\n\n\n # pad input to MAX_LEN\n input_ids = pad_sequences(tokenized_texts, maxlen=BERT_MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n # get attention masks and segment ids \n attention_masks = build_attention_mask(input_ids)\n segment_ids = build_segment_ids(input_ids)\n\n all_samples = [{\"input_ids\": input_ids[i], \"token_type_ids\": segment_ids[i], \"attention_mask\": attention_masks[i], \"label\": labels[i]} for i in range(len(input_ids))]\n torch.save(all_samples, cache_fp)\n\n return all_samples", "def batch_to_tensors(batch, dictionary, iseval=False, force_min_sen_len=-1):\r\n max_sent_length1, max_sent_length2 = get_max_length(batch)\r\n if force_min_sen_len>max_sent_length1: max_sent_length1 = force_min_sen_len\r\n if force_min_sen_len>max_sent_length2: max_sent_length2 = force_min_sen_len\r\n\r\n all_sentences1 = torch.LongTensor(len(batch), max_sent_length1)\r\n sent_len1 = np.zeros(len(batch), dtype=np.int)\r\n all_sentences2 = torch.LongTensor(len(batch), max_sent_length2)\r\n sent_len2 = np.zeros(len(batch), dtype=np.int)\r\n labels = torch.LongTensor(len(batch))\r\n for i in range(len(batch)):\r\n sent_len1[i], sent_len2[i] = len(batch[i].sentence1), len(batch[i].sentence2)\r\n\r\n\r\n if force_min_sen_len>sent_len1[i]: sent_len1[i] = force_min_sen_len\r\n if force_min_sen_len>sent_len2[i]: sent_len2[i] = force_min_sen_len\r\n\r\n trim_flag = False\r\n if sent_len1[i]>max_sent_length1 or sent_len2[i]>max_sent_length1:\r\n sent_len1[i] = force_min_sen_len\r\n sent_len2[i] = force_min_sen_len\r\n trim_flag = True\r\n\r\n if not trim_flag:\r\n all_sentences1[i] = sequence_to_tensor(batch[i].sentence1, max_sent_length1, dictionary)\r\n all_sentences2[i] = sequence_to_tensor(batch[i].sentence2, max_sent_length2, dictionary)\r\n else:\r\n all_sentences1[i] = sequence_to_tensor(batch[i].sentence1[:max_sent_length1], max_sent_length1, dictionary)\r\n all_sentences2[i] = sequence_to_tensor(batch[i].sentence2[:max_sent_length1], max_sent_length2, dictionary)\r\n\r\n labels[i] = batch[i].label\r\n\r\n if iseval:\r\n return Variable(all_sentences1, volatile=True), sent_len1, Variable(all_sentences2, volatile=True), sent_len2, \\\r\n Variable(labels, volatile=True)\r\n else:\r\n return Variable(all_sentences1), sent_len1, Variable(all_sentences2), sent_len2, Variable(labels)", "def _get_entities_representation(self, bert_out: tf.Tensor, ner_labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n # dropout\n bert_out = self.bert_dropout(bert_out, training=self.training_ph)\n\n # pieces -> tokens\n x = tf.gather_nd(bert_out, self.first_pieces_coords_ph) # [batch_size, num_tokens, bert_dim]\n\n # birnn\n if self.birnn_re is not None:\n sequence_mask = tf.sequence_mask(self.num_tokens_ph)\n x = self.birnn_re(x, training=self.training_ph, mask=sequence_mask) # [N, num_tokens, cell_dim * 2]\n # d_model = self.config[\"model\"][\"re\"][\"rnn\"][\"cell_dim\"] * 2\n # else:\n # d_model = self.config[\"model\"][\"bert\"][\"dim\"]\n\n # маскирование\n num_tokens = tf.shape(ner_labels)[1]\n mask = upper_triangular(num_tokens, dtype=tf.int32)\n ner_labels *= mask[None, :, :]\n\n # векторизация сущностей\n no_entity_id = self.config[\"model\"][\"ner\"][\"no_entity_id\"]\n span_mask = tf.not_equal(ner_labels, no_entity_id) # [batch_size, num_tokens, num_tokens]\n start_coords, end_coords, num_entities = get_padded_coords_3d(mask_3d=span_mask)\n x_start = tf.gather_nd(x, start_coords) # [N, num_entities, D]\n x_end = tf.gather_nd(x, end_coords) # [N, num_entities, D]\n\n # attn\n grid, sequence_mask_span = get_span_indices(\n start_ids=start_coords[:, :, 1],\n end_ids=end_coords[:, :, 1]\n ) # ([batch_size, num_entities, span_size], [batch_size, num_entities, span_size])\n\n batch_size = tf.shape(x)[0]\n x_coord = tf.range(batch_size)[:, None, None, None] # [batch_size, 1, 1, 1]\n grid_shape = tf.shape(grid) # [3]\n x_coord = tf.tile(x_coord, [1, grid_shape[1], grid_shape[2], 1]) # [batch_size, num_entities, span_size, 1]\n y_coord = tf.expand_dims(grid, -1) # [batch_size, num_entities, span_size, 1]\n coords = tf.concat([x_coord, y_coord], axis=-1) # [batch_size, num_entities, span_size, 2]\n x_span = tf.gather_nd(x, coords) # [batch_size, num_entities, span_size, d_model]\n # print(x_span)\n w = self.dense_attn_1(x_span) # [batch_size, num_entities, span_size, H]\n w = self.dense_attn_2(w) # [batch_size, num_entities, span_size, 1]\n sequence_mask_span = tf.expand_dims(sequence_mask_span, -1)\n w += get_additive_mask(sequence_mask_span) # [batch_size, num_entities, span_size, 1]\n w = tf.nn.softmax(w, axis=2) # [batch_size, num_entities, span_size, 1]\n x_span = tf.reduce_sum(x_span * w, axis=2) # [batch_size, num_entities, d_model]\n\n # concat\n x_entity = tf.concat([x_start, x_end, x_span], axis=-1) # [batch_size, num_entities, d_model * 3]\n\n return x_entity, num_entities", "def _build(self):\n if self.attn:\n self.Attn = AttentionNet(self.dim_b1, channels=self.channels, name='Attn')\n self.predsb1 = self.Attn(self.xb1, is_training=self.is_training)\n self.predsb2 = self.Attn(self.xb2, is_training=self.is_training, reuse=True)\n #TODO: generators want to make their synthetics look like b1/b2 to attn model\n\n self.loss_attn = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb1, labels=tf.zeros_like(self.predsb1)))\n self.loss_attn += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb2, labels=tf.ones_like(self.predsb2)))\n\n self.attnb1 = tf.gradients(self.loss_attn, self.xb1)[0]\n self.attnb2 = tf.gradients(self.loss_attn, self.xb2)[0]\n\n self.attnb1 = tf.abs(self.attnb1)\n self.attnb1 = self.attnb1 / tf.reduce_sum(self.attnb1, axis=1, keep_dims=True)\n self.attnb1 = self.attnb1 / tf.reduce_max(self.attnb1, axis=1, keep_dims=True)\n\n self.attnb2 = tf.abs(self.attnb2)\n self.attnb2 = self.attnb2 / tf.reduce_sum(self.attnb2, axis=1, keep_dims=True)\n self.attnb2 = self.attnb2 / tf.reduce_max(self.attnb2, axis=1, keep_dims=True)\n\n self.attnb1 = nameop(self.attnb1, 'attnb1')\n self.attnb2 = nameop(self.attnb2, 'attnb2')\n\n self.G12 = GeneratorResnet(self.dim_b1, self.dim_b2, channels=self.channels, name='G12')\n self.Gb2 = self.G12(self.xb1, is_training=self.is_training)\n self.Gb2 = nameop(self.Gb2, 'Gb2')\n\n self.G21 = GeneratorResnet(self.dim_b2, self.dim_b1, channels=self.channels, name='G21')\n self.Gb1 = self.G21(self.xb2, is_training=self.is_training)\n self.Gb1 = nameop(self.Gb1, 'Gb1')\n\n\n self.Gb2_reconstructed = self.G12(self.Gb1, is_training=self.is_training, reuse=True)\n self.Gb1_reconstructed = self.G21(self.Gb2, is_training=self.is_training, reuse=True)\n\n self.Gb1_reconstructed = nameop(self.Gb1_reconstructed, 'xb1_reconstructed')\n self.Gb2_reconstructed = nameop(self.Gb2_reconstructed, 'xb2_reconstructed')\n\n self.D1 = Discriminator(self.dim_b1, 1, channels=self.channels, name='D1')\n self.D2 = Discriminator(self.dim_b2, 1, channels=self.channels, name='D2')\n\n self.D1_probs_z = self.D1(self.xb1, is_training=self.is_training)\n self.D1_probs_G = self.D1(self.Gb1, is_training=self.is_training, reuse=True)\n self.D1_probs_z = nameop(self.D1_probs_z, 'D1_probs_z')\n self.D1_probs_G = nameop(self.D1_probs_G, 'D1_probs_G')\n\n self.D2_probs_z = self.D2(self.xb2, is_training=self.is_training)\n self.D2_probs_G = self.D2(self.Gb2, is_training=self.is_training, reuse=True)\n self.D2_probs_z = nameop(self.D2_probs_z, 'D2_probs_z')\n self.D2_probs_G = nameop(self.D2_probs_G, 'D2_probs_G')\n\n self._build_loss()\n\n self._build_optimization()", "def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)", "def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X", "def sample_generator(self, sess):\n\n to_return = {\n 'g_sample': self.G_sample_test,\n }\n return sess.run(to_return)", "def adj_batch():\n return torch.Tensor([[[1, 3], [3, 1]], [[7, 8], [8, 7]]])", "def batcher(params, batch):\n bpe_batch_indices = []\n for sentence in batch:\n sentence = ' '.join(sentence)\n if bert:\n indices = format_sentence_with_bert(sentence, params['wordpiece'], max_len)\n else:\n indices = format_sentence(sentence, params['bpe'], max_len)\n bpe_batch_indices.append(torch.LongTensor(indices))\n\n bpe_batch_indices = torch.stack(bpe_batch_indices, 0)\n\n # send to gpu\n bpe_batch_indices = bpe_batch_indices.to(params['device'])\n # if bert_max:\n # # we use max over BERT embeddings as sentence representation\n # with torch.no_grad():\n # all_embs, _ = params['bert'](bpe_batch_indices)[-2:]\n # all_embs, _ = torch.max(all_embs, 1) # get maximum value along the time dimension 1\n # all_embs = all_embs.cpu().detach().numpy()\n # elif bert_mean:\n # # we use mean over BERT embeddings as sentence representation\n # with torch.no_grad():\n # all_embs, _ = params['bert'](bpe_batch_indices)[-2:]\n # all_embs = torch.mean(all_embs, 1) # get maximum value along the time dimension 1\n # all_embs = all_embs.cpu().detach().numpy()\n # else:\n\n # we use model to calculate embeddings\n all_embs = calculate_model_outputs(params['model'], bpe_batch_indices)\n all_embs = all_embs.cpu().detach().numpy()\n\n return all_embs", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def train_generator_PG(generator, dataloader, gen_opt, dis):\n\n for i, (input_tensor, target_tensor) in enumerate(dataloader):\n input_tensor, target_tensor = input_tensor.to(DEVICE), target_tensor.to(DEVICE)\n\n\n output = generator.sample(input_tensor, length=target_tensor.shape[1])\n rewards = dis.batchClassify(target_tensor)\n\n gen_opt.zero_grad()\n pg_loss = generator.batchPGLoss(output, target_tensor, rewards)\n pg_loss.backward()\n gen_opt.step()\n\n if i % PRINT_EVERY == 0:\n print(' the PG_loss = {}'.format(pg_loss))\n\n if i % SAVE_EVERY == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': generator.state_dict(),\n 'optimizer': gen_opt.state_dict(),\n }, filename='generator.m')\n\n if i % EVALUATE_EVERY == 0:\n test_sentence = input_tensor[0, :]\n test_target_sentence = target_tensor[0, :]\n generated_sentence = output[0, :]\n\n real_test_sentence = dataloader.dataset.vocabulary.tokens_to_sent(test_sentence)\n real_target_sentence = dataloader.dataset.vocabulary.tokens_to_sent(test_target_sentence)\n\n generated_sentence = dataloader.dataset.vocabulary.tokens_to_sent(generated_sentence)\n\n print(real_test_sentence)\n print('>>')\n print(generated_sentence)\n print('==')\n print(real_target_sentence)\n print('-----------------------------')", "def _build_batch_transformer(self, input_tensor):\n\n self._dual.set_op('bt_input', input_tensor)\n\n # get hyper params\n sample_repeat = self._hparams.bt_sample_repeat\n blank_repeat = self._hparams.bt_blank_repeat\n presentation_repeat = self._hparams.bt_presentation_repeat\n is_degrade = self._hparams.bt_degrade\n degrade_repeat = self._hparams.bt_degrade_repeat\n degrade_type = self._hparams.bt_degrade_type\n degrade_value = self._hparams.bt_degrade_value\n degrade_factor = self._hparams.bt_degrade_factor\n\n # note: x_batch = input batch, y_batch = transformed batch\n\n with tf.variable_scope('batch_transformer'):\n\n # convert input to shape [batch, samples]\n input_shape = input_tensor.get_shape().as_list()\n input_area = np.prod(input_shape[1:])\n batch_input_shape = (-1, input_area)\n\n input_vector = tf.reshape(input_tensor, batch_input_shape, name='input_vector')\n logging.debug(input_vector)\n\n x_batch_length = input_shape[0]\n y_batch_length = presentation_repeat * x_batch_length * (sample_repeat + blank_repeat) + \\\n (1 + degrade_repeat if is_degrade else 0)\n\n self._blank_indices = []\n for p in range(presentation_repeat):\n start_pres = p * x_batch_length * (sample_repeat + blank_repeat)\n for s in range(x_batch_length):\n blank_start = start_pres + s*(sample_repeat+blank_repeat) + sample_repeat\n self._blank_indices.append([blank_start, blank_start + blank_repeat-1])\n\n # start with all blanks, in this case zero tensors\n y_batch = tf.get_variable(initializer=tf.zeros(shape=[y_batch_length, input_area]),\n trainable=False,\n name='blanks')\n\n # use scatter updates to fill with repeats, can not do 1-to-many, so need to do it\n # `pres_repeat * sample_repeat` times\n presentation_length = x_batch_length * (sample_repeat + blank_repeat)\n for p in range(presentation_repeat):\n\n input_vector = tf.random_shuffle(input_vector)\n\n for i in range(sample_repeat):\n x2y = [] # the idx itself is the x_idx, val = y_idx\n for x_idx in range(x_batch_length):\n y_idx = (p * presentation_length) + x_idx * (sample_repeat + blank_repeat) + i\n x2y.append(y_idx)\n xy_scatter_map = tf.constant(value=x2y, name='x_y_scatter_' + str(i))\n y_batch = tf.scatter_update(y_batch, xy_scatter_map, input_vector, name=\"sample_repeat\")\n\n # append degraded and non-degraded samples\n if is_degrade:\n # randomly choose one of the input vectors\n input_shuffled = tf.random_shuffle(input_vector)\n target = input_shuffled[0]\n\n if degrade_type == 'horizontal':\n degraded = image_utils.degrade_image(input_shuffled, label=None, degrade_type='horizontal',\n degrade_value=degrade_value)[0]\n elif degrade_type == 'vertical':\n raise NotImplementedError('vertical degradation not implemented')\n elif degrade_type == 'random':\n\n # This next commented out line, caused major malfunction (result was passed to degrade in place of the whole batch - but i have no idea why)\n # degraded_samples = tf.reshape(target, batch_input_shape) # for efficiency, only degrade one image\n\n min_value_0 = True\n if min_value_0 is not True:\n degraded = image_utils.degrade_image(image=input_shuffled, label=None, degrade_type='random',\n degrade_value=degrade_value,\n degrade_factor=degrade_factor)[0]\n else:\n # degrade the high bits (not the bits that are already zero)\n eps = 0.01\n degrade_mask = tf.greater(target, 1.0 - eps)\n degrade_mask = tf.to_float(degrade_mask)\n degrade_mask = tf_print(degrade_mask, \"degrade_mask\", mute=True)\n\n degraded = tf_utils.degrade_by_mask(input_tensor=input_shuffled,\n num_active=self._active_bits,\n degrade_mask=degrade_mask,\n degrade_factor=degrade_factor,\n degrade_value=degrade_value)[0]\n\n else:\n raise NotImplementedError('Unknown degradation type.')\n\n degraded_repeated = tf.ones([degrade_repeat, input_area])\n degraded_repeated = degraded_repeated * degraded\n\n target = tf.reshape(target, [1, input_area])\n degraded_and_target = tf.concat([degraded_repeated, target], 0)\n\n index_map = []\n for i in range(degrade_repeat+1):\n index_map.insert(0, y_batch_length - 1 - i)\n\n y_batch = tf.scatter_update(y_batch, tf.constant(index_map), degraded_and_target, name=\"degradetarget\")\n y_batch = tf.stop_gradient(y_batch)\n\n self._dual.set_op('bt_output', y_batch)\n return y_batch", "def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps):\n\n def model_fn(features, labels, mode, params):\n \"\"\"this is prototype syntax, all parameters are necessary.\"\"\"\n # obtain the data\n _info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features['input_ids'] # [batch_size, seq_length]\n input_mask = features['input_mask'] # [batch_size, seq_length]\n\n # if mode != tf.estimator.ModeKeys.PREDICT:\n # # segment_idx = features['segment_dis']\n # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer\n # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels\n # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask\n # # next_sentence_labels = features['next_sentence_labels']\n # else:\n masked_lm_positions = features['masked_lm_positions']\n masked_lm_ids = features['masked_lm_ids']\n masked_lm_weights = features['masked_lm_weights']\n\n if bert_config.train_type == 'seq2seq':\n _info('Training seq2seq task.')\n elif bert_config.train_type == 'lm':\n _info('Training language model task.')\n \n # build model\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask)\n \n # compute loss\n loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.embedding_table,\n model.projection_table,\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN: \n # restore from the checkpoint,\n # tf.estimator automatically restore from the model typically,\n # maybe here is for restore some pre-trained parameters\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n _info('*** Trainable Variables ***')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))\n \n train_op = optimization.create_optimizer(\n loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)\n\n # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,\n # tf.train.get_or_create_global_step(),\n # num_train_steps,\n # end_learning_rate=0.0,\n # power=1.0,\n # cycle=False)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)\n \n def metric_fn(loss, label_ids, logits, is_real_example):\n \"\"\"\n Args:\n loss: tf.float32.\n label_ids: [b, s].\n logits: [b, s, v].\n \"\"\"\n # [b * s, v]\n logits = tf.reshape(logits, [-1, logits.shape[-1]])\n # [b * s, 1]\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # [b * s]\n label_ids = tf.reshape(label_ids, [-1])\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=loss)\n return {'eval_accuracy': accuracy, 'eval_loss': loss}\n \n eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)\n output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)\n\n return output_spec\n \n return model_fn", "def _get_entities_representation(self, bert_out: tf.Tensor, ner_labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n # dropout\n bert_out = self.bert_dropout(bert_out, training=self.training_ph)\n\n # pieces -> tokens\n x = tf.gather_nd(bert_out, self.first_pieces_coords_ph) # [batch_size, num_tokens, bert_dim]\n\n # birnn\n if self.birnn_re is not None:\n sequence_mask = tf.sequence_mask(self.num_tokens_ph)\n x = self.birnn_re(x, training=self.training_ph, mask=sequence_mask) # [N, num_tokens, cell_dim * 2]\n d_model = self.config[\"model\"][\"re\"][\"rnn\"][\"cell_dim\"] * 2\n else:\n d_model = self.config[\"model\"][\"bert\"][\"dim\"]\n\n # маскирование\n num_tokens = tf.shape(ner_labels)[1]\n mask = upper_triangular(num_tokens, dtype=tf.int32)\n ner_labels *= mask[None, :, :]\n\n # векторизация сущностей\n no_entity_id = self.config[\"model\"][\"ner\"][\"no_entity_id\"]\n span_mask = tf.not_equal(ner_labels, no_entity_id) # [batch_size, num_tokens, num_tokens]\n start_coords, end_coords, num_entities = get_padded_coords_3d(mask_3d=span_mask)\n if self.config[\"model\"][\"re\"][\"entity_emb_type\"] == 0:\n # требуется специальный токен начала и окончания последовательности\n entity_emb_fn = get_entity_embeddings\n elif self.config[\"model\"][\"re\"][\"entity_emb_type\"] == 1:\n entity_emb_fn = get_entity_embeddings_concat_half\n else:\n raise\n x_entity = entity_emb_fn(x=x, d_model=d_model, start_coords=start_coords, end_coords=end_coords)\n\n # добавление эмбеддингов лейблов сущностей\n if self.config[\"model\"][\"re\"][\"use_entity_emb\"]:\n entity_coords = tf.concat([start_coords, end_coords[:, :, -1:]], axis=-1)\n ner_labels_2d = tf.gather_nd(ner_labels, entity_coords)\n ner_labels_2d *= tf.sequence_mask(num_entities, dtype=tf.int32)\n\n x_emb = self.ner_emb(ner_labels_2d)\n x_entity += x_emb\n\n return x_entity, num_entities", "def generate(self, batch_size: int = 1) -> torch.Tensor:\n z = self.prior.sample((batch_size, self.latent_size))\n recon_mu, recon_sigma = self.decoder(z).chunk(2, dim=1)\n recon_sigma = softplus(recon_sigma)\n return recon_mu + recon_sigma * torch.rand_like(recon_sigma)", "def generate(args):\n\n # Using the data Augmentation in traning data\n\n normalizer = Normalizer()\n\n train_aug = tf.keras.preprocessing.image.ImageDataGenerator(\n #rescale=1. / 255.,\n shear_range=args.shear_range,\n zoom_range=args.zoom_range,\n rotation_range=args.rotation_range,\n width_shift_range=args.width_shift_range,\n height_shift_range=args.height_shift_range,\n horizontal_flip=args.horizontal_flip,\n vertical_flip=args.vertical_flip,\n preprocessing_function=normalizer)\n\n\n validation_aug = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=normalizer)\n\n train_generator = train_aug.flow_from_directory(\n args.train_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical',\n shuffle=True)\n\n mean, std = [], []\n if args.mean is None or args.std is None:\n mean, std = normalizer.get_stats(args.train_dir, train_generator.filenames, (args.input_size, args.input_size))\n else:\n mean = [float(m.strip()) for m in args.mean.split(',')]\n std = [float(s.strip()) for s in args.std.split(',')]\n normalizer.set_stats(mean, std)\n\n if not os.path.exists('model'):\n os.makedirs('model')\n with open('model/stats.txt', 'w') as stats:\n stats.write(\"Dataset mean [r, g, b] = {}\\n\".format(mean))\n\n\n label_map = (train_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items())\n\n with open('model/labels.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n csv_writer.writerows(label_map.items())\n\n validation_generator = validation_aug.flow_from_directory(\n args.validation_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical')\n\n return train_generator, validation_generator, train_generator.samples, validation_generator.samples, len(label_map)", "def get_tensors(loaded_graph):\n g = loaded_graph\n InputTensor = g.get_tensor_by_name(\"input:0\")\n InitialStateTensor = g.get_tensor_by_name(\"initial_state:0\")\n FinalStateTensor = g.get_tensor_by_name(\"final_state:0\") \n ProbsTensor = g.get_tensor_by_name(\"probs:0\")\n\n return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output", "def simple_batching(config, insts: List[Instance]) -> Tuple[torch.Tensor,torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor]:\n batch_size = len(insts)\n batch_data = insts\n # probably no need to sort because we will sort them in the model instead.\n # batch_data = sorted(insts, key=lambda inst: len(inst.input.words), reverse=True) ##object-based not direct copy\n sent_seq_len = torch.LongTensor(list(map(lambda inst: len(inst.input.sents), batch_data)))\n max_seq_len = sent_seq_len.max()\n\n # num_tokens = torch.LongTensor(list(map(lambda inst: inst.max_num_tokens, batch_data)))\n # max_tokens = num_tokens.max()\n\n num_tokens = list(map(lambda inst: inst.num_tokens, batch_data)) # 2-dimension\n max_tokens = max([max(num_token) for num_token in num_tokens])\n\n # NOTE: Use 1 here because the CharBiLSTM accepts\n char_seq_len = torch.LongTensor([list(map(len, inst.input.sents)) + [1] * (int(max_seq_len) - len(inst.input.sents)) for inst in batch_data])\n max_char_seq_len = char_seq_len.max()\n\n context_emb_tensor = None\n if config.context_emb != ContextEmb.none:\n emb_size = insts[0].elmo_vec.shape[1]\n context_emb_tensor = torch.zeros((batch_size, max_seq_len, emb_size))\n\n # emb_size = len(insts[0][0].vec)\n # print('emb_size: ',emb_size)\n emb_size = 768\n\n # word_seq_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)\n label_seq_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)\n review_idx_tensor = torch.full((batch_size, max_seq_len),0, dtype=torch.long)\n reply_idx_tensor = torch.full((batch_size, max_seq_len),0, dtype=torch.long)\n\n char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_char_seq_len), dtype=torch.long)\n\n initial_sent_emb_tensor = torch.zeros((batch_size, max_seq_len, max_tokens, emb_size), dtype=torch.float32)\n sent_emb_tensor = torch.zeros((1,), dtype=torch.float32)\n\n # input = torch.zeros((batch_size, num_sents, emb_size))\n\n type_id_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)\n num_tokens_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)\n\n pair_tensor = torch.zeros((batch_size,max_seq_len,max_seq_len), dtype = torch.float32)\n pair_tensor_train = torch.zeros((batch_size, max_seq_len, max_seq_len), dtype=torch.float32)\n pair_padding_tensor = torch.zeros((batch_size, max_seq_len, max_seq_len), dtype=torch.float32)\n pair_padding_train = torch.zeros((batch_size, max_seq_len, max_seq_len), dtype=torch.float32)\n # pair_padding_tensor1 = torch.zeros((batch_size, max_seq_len, max_seq_len), dtype=torch.float32)\n\n max_review_tensor = torch.zeros((batch_size), dtype=torch.long)\n\n for idx in range(batch_size):\n\n\n # word_seq_tensor[idx, :word_seq_len[idx]] = torch.LongTensor(batch_data[idx].word_ids)\n if batch_data[idx].output_ids:\n # print('output_ids: ', batch_data[idx].output_ids)\n # print('review_idx: ', batch_data[idx].review_idx)\n # print(sent_seq_len[idx])\n # print(\"batch_data[idx].max_review_id: \",batch_data[idx].max_review_id )\n max_review_tensor[idx]=batch_data[idx].max_review_id\n label_seq_tensor[idx, :sent_seq_len[idx]] = torch.LongTensor(batch_data[idx].output_ids)\n review_idx_tensor[idx, torch.LongTensor(batch_data[idx].review_idx)] = torch.LongTensor(batch_data[idx].review_idx)\n reply_idx_tensor[idx, torch.LongTensor(batch_data[idx].reply_idx)] = torch.LongTensor(batch_data[idx].reply_idx)\n type_id_tensor[idx, :sent_seq_len[idx]] = torch.LongTensor(batch_data[idx].type)\n num_tokens_tensor[idx, :sent_seq_len[idx]] = torch.LongTensor(batch_data[idx].num_tokens)\n\n if config.context_emb != ContextEmb.none:\n context_emb_tensor[idx, :sent_seq_len[idx], :] = torch.from_numpy(batch_data[idx].elmo_vec)\n # print('type id tensor', type_id_tensor[idx])\n # print('max review id', batch_data[idx].max_review_id, len(batch_data[idx].review_idx))\n # pair_padding_tensor[idx, :len(batch_data[idx].review_idx), batch_data[idx].max_review_id:sent_seq_len[idx]] = 1.0\n # pair_padding_tensor[idx, torch.LongTensor(batch_data[idx].review_idx), torch.LongTensor(batch_data[idx].reply_idx)] = 1.0\n\n\n for sent_idx in range(sent_seq_len[idx]):\n for token_idx in range(num_tokens[idx][sent_idx]):\n initial_sent_emb_tensor[idx, sent_idx, token_idx, :emb_size] = torch.Tensor(batch_data[idx].vec[sent_idx][token_idx])\n\n # print('sent_emb_tensor', sent_emb_tensor[idx, sent_idx, 0])\n char_seq_tensor[idx, sent_idx, :char_seq_len[idx, sent_idx]] = torch.LongTensor(batch_data[idx].char_ids[sent_idx])\n\n if sent_idx < batch_data[idx].max_review_id:\n for sent_idx2 in range(sent_idx+1, sent_seq_len[idx]):\n if batch_data[idx].labels_pair[sent_idx] == batch_data[idx].labels_pair[sent_idx2] \\\n and batch_data[idx].labels_pair[sent_idx] != 0 \\\n and batch_data[idx].type[sent_idx] == 0 and batch_data[idx].type[sent_idx2] == 1:\n pair_tensor[idx,sent_idx,sent_idx2]=1.0\n pair_tensor_train[idx, sent_idx, sent_idx2] = 1.0\n #if batch_data[idx].type[sent_idx]==0 and batch_data[idx].type[sent_idx2]==1:\n # pair_padding_train[idx, sent_idx, sent_idx2] = 1.0\n if batch_data[idx].labels_pair[sent_idx] != 0 and batch_data[idx].labels_pair[sent_idx2] != 0 \\\n and batch_data[idx].type[sent_idx] == 0 and batch_data[idx].type[sent_idx2] == 1:\n pair_padding_tensor[idx,sent_idx,sent_idx2]=1.0\n # if batch_data[idx].type[sent_idx2 - 1] == 1:\n # pair_padding_tensor[idx, sent_idx, sent_idx2-1] = 1.0\n # if batch_data[idx].type[sent_idx2 - 2] == 1:\n # pair_padding_tensor[idx, sent_idx, sent_idx2 - 2] = 1.0\n # if sent_idx2 +1 < sent_seq_len[idx]:\n # pair_padding_tensor[idx, sent_idx, sent_idx2 + 1] = 1.0\n # if sent_idx2 + 2 < sent_seq_len[idx]:\n # pair_padding_tensor[idx, sent_idx, sent_idx2 + 2] = 1.0\n # print(\"sum:\", pair_padding_tensor.sum())\n # print((pair_padding_tensor[idx]==pair_padding_tensor1[idx]).all())\n\n # print('sum of pair padding tensor', torch.sum(pair_padding_tensor[idx]),torch.sum(pair_padding_tensor1[idx]))\n # print(pair_padding_tensor[idx][:100])\n\n\n # print(pair_tensor[idx,])\n for sentIdx in range(sent_seq_len[idx], max_seq_len):\n char_seq_tensor[idx, sentIdx, 0: 1] = torch.LongTensor([config.char2idx[PAD]]) ###because line 119 makes it 1, every single character should have a id. but actually 0 is enough\n\n tmp = pair_padding_tensor + pair_tensor\n\n for idx in range(batch_size):\n\n for sent_idx in range(sent_seq_len[idx]):\n # pair_padding_train[idx, sent_idx,tmp[idx,sent_idx,:]==1] =1\n\n if (tmp[idx, sent_idx, :] == 1).sum() >= 5:\n valid_idx = (tmp[idx, sent_idx, :] == 1).nonzero().view(-1)\n choice = torch.multinomial(valid_idx.float(), 5)\n pair_padding_train[idx, sent_idx, valid_idx[choice]] = 1\n else:\n pair_padding_train[idx, sent_idx,tmp[idx,sent_idx,:]==1] =1\n\n #if (tmp[idx, sent_idx, :] == 0).sum() >= 1:\n # valid_idx = (tmp[idx, sent_idx, :] == 0).nonzero().view(-1)\n # choice = torch.multinomial(valid_idx.float(), 1)\n # pair_padding_train[idx, sent_idx, valid_idx[choice]] = 1\n #else:\n # pair_padding_train[idx, sent_idx,tmp[idx,sent_idx,:]==0] =1\n\n\n pair_padding_train[pair_tensor == 1] = 1\n pair_tensor_train[pair_padding_train == 0] = -100\n pair_tensor[pair_padding_tensor == 0] = -100\n\n # print('number of not -100', torch.sum(pair_tensor != -100), torch.sum(pair_tensor_train != -100))\n # word_seq_tensor = word_seq_tensor.to(config.device)\n label_seq_tensor = label_seq_tensor.to(config.device)\n char_seq_tensor = char_seq_tensor.to(config.device)\n sent_seq_len = sent_seq_len.to(config.device)\n char_seq_len = char_seq_len.to(config.device)\n\n # sent_emb_tensor = sent_emb_tensor.to(config.device)\n type_id_tensor = type_id_tensor.to(config.device)\n\n review_idx_tensor = review_idx_tensor.to(config.device)\n reply_idx_tensor = reply_idx_tensor.to(config.device)\n\n pair_tensor = pair_tensor.to(config.device)\n pair_padding_tensor = pair_padding_tensor.to(config.device)\n pair_padding_train = pair_padding_train.to(config.device)\n pair_tensor_train = pair_tensor_train.to(config.device)\n\n return sent_emb_tensor, type_id_tensor, sent_seq_len, num_tokens_tensor, initial_sent_emb_tensor, context_emb_tensor, char_seq_tensor, char_seq_len, pair_tensor,pair_padding_tensor, label_seq_tensor, review_idx_tensor, reply_idx_tensor, pair_tensor_train, pair_padding_train, max_review_tensor", "def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:", "def __init__(self,nback=1,ntokens_pm=2,ntokens_og=3,stimdim=2,seed=99):\n np.random.seed(seed)\n tr.manual_seed(seed)\n self.nback = nback\n # embedding\n self.ntokens_pm = ntokens_pm\n self.ntokens_og = ntokens_og\n self.stimdim = stimdim\n # emat\n self.randomize_emat()\n return None", "def buildGenerator():\n inputs = tf.keras.layers.Input(shape=[256,256,3])\n\n down_stack = [\n downsample(64, 4, (None, 256, 256, 3), apply_batchnorm=False), # (bs, 128, 128, 64)\n downsample(128, 4, (None, 128, 128, 64)), # (bs, 64, 64, 128)\n downsample(256, 4, (None, 64, 64, 128)), # (bs, 32, 32, 256)\n downsample(512, 4, (None, 32, 32, 256)), # (bs, 16, 16, 512)\n downsample(512, 4, (None, 16, 16, 512)), # (bs, 8, 8, 512)\n downsample(512, 4, (None, 8, 8, 512)), # (bs, 4, 4, 512)\n downsample(512, 4, (None, 4, 4, 512)), # (bs, 2, 2, 512)\n downsample(512, 4, (None, 2, 2, 512)), # (bs, 1, 1, 512)\n ]\n\n up_stack = [\n upsample(512, 4, (None, 1, 1, 512), apply_dropout=True), # (bs, 2, 2, 1024)\n upsample(512, 4, (None, 2, 2, 1024), apply_dropout=True), # (bs, 4, 4, 1024)\n upsample(512, 4, (None, 4, 4, 1024), apply_dropout=True), # (bs, 8, 8, 1024)\n upsample(512, 4, (None, 8, 8, 1024)), # (bs, 16, 16, 1024)\n upsample(256, 4, (None, 16, 16, 1024)), # (bs, 32, 32, 512)\n upsample(128, 4, (None, 32, 32, 512)), # (bs, 64, 64, 256)\n upsample(64, 4, (None, 64, 64, 256)), # (bs, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n activation='tanh') # (bs, 256, 256, 3)\n\n x = inputs\n\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)", "def gen_ep_data(self,min_trial_len=2,max_trial_len=3,ntrials=2):\n # self.randomize_emat()\n tseq,xseq,yseq = self.gen_seqs_multitrial(min_trial_len,max_trial_len,ntrials)\n xseq_embed = self.embed_xseq(xseq)\n # np to torch\n tseq = tr.unsqueeze(tr.LongTensor(tseq),1)\n xseq_embed = tr.unsqueeze(tr.Tensor(xseq_embed),1)\n yseq = tr.unsqueeze(tr.LongTensor(yseq),1)\n return tseq,xseq_embed,yseq", "def encode_input(sent, lemmas, pos_tags, predicates, vocab_preds, vocab_lemmas, vocab_pos_tags, bert_embedder, task_234):\n sentence = bert_embedder.embed_sentence([sent])[0]\n lens = torch.LongTensor([len(sent)])\n pos_tags = torch.tensor([vocab_pos_tags[pos_tag] for pos_tag in pos_tags])\n lemmas = torch.tensor([vocab_lemmas[lemma] if lemma in vocab_lemmas else vocab_lemmas['<unk>'] for lemma in lemmas])\n vocab_preds = {word: int(idx) for idx, word in vocab_preds.items()}\n predicate_null_token = vocab_preds['_']\n if task_234:\n # if the task is predicate disambiguation, predicates indicates only the position of the predicates in the sentence\n predicates = torch.LongTensor(predicates)\n positions = (predicates == 1).nonzero()\n else:\n # otherwise predicates must be encoded with the value stored in the vocabulary\n predicates = torch.tensor([vocab_preds[predicate] for predicate in predicates])\n positions = (predicates != predicate_null_token).nonzero()\n\n return sentence[None,...], lens, pos_tags[None,...], lemmas[None,...], predicates[None,...], positions.view(-1).tolist(), predicate_null_token" ]
[ "0.62490666", "0.6054576", "0.6015713", "0.5830311", "0.576799", "0.5767857", "0.57553375", "0.5751452", "0.5746628", "0.5678213", "0.5621775", "0.5615323", "0.56034464", "0.5599364", "0.5594678", "0.5589969", "0.5567636", "0.5561835", "0.5532102", "0.5522995", "0.54923373", "0.5486534", "0.5481936", "0.5466508", "0.54661316", "0.546344", "0.54615813", "0.5459678", "0.5448605", "0.5441531" ]
0.6425694
0
Interpolate a dict of splines over their ranges
def interpolate(att_spline, indices, range_info=None): v = {} db = {} module_logger.debug("interpolate: ranges are %s", range_info) for index in indices: if range_info: v[index] = arange(*range_info[index]) else: v[index] = arange(-10, 0.5, 0.1) module_logger.debug("interpolate: %s at \n%s", index, v[index]) db[index] = att_spline[index](v[index]) return v, db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def interpolator(self, states):\n t = np.asarray([state.t for state in states])\n y = np.asarray([state.y for state in states])\n\n return interp1d(t, y, assume_sorted=True, kind='cubic')", "def _prepare_spline_interpolator(self):\n\n # TODO Replace by scipy.ndimage.interpolation.map_coordinates\n\n from scipy.interpolate import RectBivariateSpline\n\n x = self.offset.value\n y = np.log10(self.energy.value)\n\n self._spline = RectBivariateSpline(x, y, self.eff_area.value)", "def lin_int(xs, ys):\n return scipy.interpolate.interp1d(xs, ys)", "def interpolateSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [6*x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [6*x2, 2, 0, 0]\n \n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def spline_interp(h,yy,yy_diff2,x) :\n assert type(yy)==numpy.ndarray\n #print(__name__, type(h))\n assert type(h)!=numpy.ndarray\n \n n=yy.shape[0]\n nlo=max(int(x/h),0)\n if nlo>n-1: return(0.0)\n nhi=min(nlo+1,n-1)\n a=nhi-x/h # This is checked... different to Fortran version due to 0-based arrays\n b=1.0-a\n y=a*yy[nlo]+b*yy[nhi]+((a**3-a)*yy_diff2[nlo]+(b**3-b)*yy_diff2[nhi])*(h**2)/6.0\n return y", "def interpolate(data, x):\n n = len(data)\n\n if isinstance(data, dict):\n if x in data:\n return S(data[x])\n X, Y = list(zip(*data.items()))\n else:\n if isinstance(data[0], tuple):\n X, Y = list(zip(*data))\n if x in X:\n return S(Y[X.index(x)])\n else:\n if x in range(1, n + 1):\n return S(data[x - 1])\n Y = list(data)\n X = list(range(1, n + 1))\n\n try:\n return interpolating_poly(n, x, X, Y).expand()\n except ValueError:\n d = Dummy()\n return interpolating_poly(n, d, X, Y).expand().subs(d, x)", "def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int", "def interpolatePeriodicSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [3 * pow(x[0],2), 2 * x[0], 1, 0]\n A[i*4+2, i*4:(i+1)*4] = [-3 * pow(x2,2), -2 * x2, -1, 0]\n A[i*4+3, 0:4] = [6 * x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [-6 * x2, -2, 0, 0]\n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def apply(lut, courbe):\n fct = interp1d(courbe[::2], courbe[1::2])\n if len(courbe) > 4:\n order, value = (\n [(2, 0)],\n [(2, 0)],\n ) # natural spline boundary conditions\n fct = make_interp_spline(\n courbe[::2], courbe[1::2], k=3, bc_type=(order, value)\n )\n for level in range(256):\n lut[level] = np.round(fct(lut[level]))", "def interpolateCubicPeriodic() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th parameter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolatePeriodicSpline(keytime, y)\n S.append(interpolants)\n return S", "def _getBivecSpline(self):\n self._brep = {}\n\n for k in self.signals.keys():\n print('Reading signal ' + self.signals[k]['string'])\n tim = self._readsignal(self.signals[k]['string']).getDimensionAt(1).data()\n if tim[0]==0:\n tim = self._readsignal(r'dim_of('+self.signals[k]['string']+',1)').data()\n _idx = np.argmin(tim-self.t < 0)\n tim = tim[_idx]\n data = self._readsignal(self.signals[k]['string']).data()[self.trialindx, _idx, :]\n rhop = self._readsignal(self.signals[k]['string']).getDimensionAt(0).data()\n if rhop[1]==1:\n rhop = self._readsignal(r'dim_of('+self.signals[k]['string']+',0)').data()\n dummy = interpolate.interp1d(rhop, data, fill_value='extrapolate')\n self._brep[k] = dict([('spline', dummy)])", "def fit_spline(x, y, **kwargs):\n xf, yf = get_finite(x,y)\n iisort = np.argsort(xf)\n return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)", "def interpolateCubicNatural() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th paramter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolateSpline(keytime, y)\n S.append(interpolants)\n return S", "def interp_envelope(env):\n spl = UnivariateSpline(env.dm, env.sigma, k=5, s=10)\n dms = np.linspace(env.dm[0], env.dm[-1], 256)\n return dms, spl(dms)", "def different_quadratic_extrpolation_lower(x_interp, x_spline, y_spline):\n index_lower_1 = 0\n index_lower_2 = 1\n x1_lower = x_spline[index_lower_1]\n x2_lower = x_spline[index_lower_2]\n x3_lower = x_spline[index_lower_2 + 1]\n f1_lower = y_spline[index_lower_1]\n\n df1_dx_lower = calc_gradient(x_spline, y_spline, index_lower_1)/(x2_lower - x1_lower)\n df2_dx_lower = calc_gradient(x_spline, y_spline, index_lower_2)/(x3_lower - x2_lower)\n\n # Solve 2ax-b = df_dx for the gradient at point 1 and 2\n # Rearrange both equations to find 'a' and 'b' quadratic coefficients\n a_lower = (df2_dx_lower - df1_dx_lower)/(2.*(x2_lower - x1_lower))\n b_lower = df1_dx_lower - 2.*a_lower*x1_lower\n\n # Find c by solving at the fixed points (f = a x**2 + bx + c) at point 1 for the lower, and point 2 for the upper\n c_lower = f1_lower - a_lower*x1_lower**2 - b_lower*x1_lower\n return a_lower*x_interp**2 + b_lower*x_interp + c_lower", "def test_linear_interpolation_range(self):\n\n for x in [[1.0, 2.0, 4.0], [-20, -19, 0], numpy.arange(200) + 1000]:\n for y in [[5.0, 9.0], [100, 200, 10000]]:\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that linearly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 100)\n etas = numpy.linspace(y[0], y[-1], 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def filter_interpolated(self, ys, xs):\n return ys, xs", "def interpolate(series, indices):\r\n not_included = [t for t in indices if t not in series.index.values]\r\n new_indices_series = pd.Series(index=not_included, data=[np.nan] * len(not_included))\r\n with_new_indices = series.append(new_indices_series).sort_index()\r\n interpolated = with_new_indices.interpolate(method=\"linear\")\r\n\r\n # We want the values on the indices before the (originally) first index to be equal to the first value. Similarly,\r\n # we want the values after the (originally) last index to be equal to the last value.\r\n min_index = min(series.index.values)\r\n max_index = max(series.index.values)\r\n start_value = series.values[0]\r\n end_value = series.values[-1]\r\n\r\n new_values = []\r\n for t, p in interpolated.items():\r\n if t < min_index:\r\n new_values.append(start_value)\r\n elif t > max_index:\r\n new_values.append(end_value)\r\n else:\r\n new_values.append(p)\r\n\r\n new_series = pd.Series(data=new_values, index=interpolated.index)\r\n\r\n return new_series", "def interpolate(x_list, y_list, z_list):\n x1 = x_list[-2]\n x2 = x_list[-1]\n y1 = y_list[-2]\n y2 = y_list[-1]\n z1 = z_list[-2]\n z2 = z_list[-1]\n r = -y1/y2\n x_land = (x1+r*x2)/(r+1)\n z_land = (z1+r*z2)/(r+1)\n x_list[-1] = x_land\n y_list[-1] = 0.0\n z_list[-1] = z_land", "def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def interpolate_spline(y, N):\n l = len(y)\n x = np.linspace(0, l, l)\n spline = interpolate.InterpolatedUnivariateSpline(x,y)\n xnew = np.linspace(0, l, N*l)\n ynew = spline(xnew)\n return ynew", "def _set_interpolators(self):\n self._interp_u = interp.RectBivariateSpline(self.x_points,\n self.y_points,\n self._u_int)\n self._interp_v = interp.RectBivariateSpline(self.x_points,\n self.y_points,\n self._v_int)", "def linear_spline_interpolation(q_, t_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n t_ = t_ / m\n timesteps = np.linspace(0, 1, num = m)\n\n a = 100\n time_segments = np.zeros((n, dof))\n blend_times = np.zeros((n, dof))\n velocities = np.zeros((n, dof))\n accelerations = np.zeros((n, dof))\n\n # Initial time segment\n accelerations[0] = np.sign(q_[1] - q_[0]) * a\n blend_times[0] = t_[0] - np.sqrt(\n t_[0] * t_[0] - 2 * (q_[1] - q_[0]) / accelerations[0])\n velocities[0] = (q_[1] - q_[0]) / (t_[0] - 0.5 * blend_times[0])\n\n # Final time segment\n accelerations[n - 1] = np.sign(q_[n - 2] - q_[n - 1]) * a\n blend_times[n - 1] = t_[n - 2] - np.sqrt(\n t_[n - 2] * t_[n - 2] + 2 * (q_[n - 1] - q_[n - 2]) / accelerations[n - 1])\n velocities[n - 2] = (q_[n - 1] - q_[n - 2]) / (t_[n - 2] - 0.5 * blend_times[n - 1])\n velocities[n - 1] = 0\n\n # Loop for velocities\n for i in range(1, n - 2):\n velocities[i] = (q_[i + 1] - q_[i]) / t_[i]\n\n # Loop for accelerations and blend times\n for i in range(1, n - 1):\n accelerations[i] = np.sign(velocities[i] - velocities[i - 1]) * a\n blend_times[i] = (velocities[i] - velocities[i - 1]) / accelerations[i]\n\n # Calculate time segments\n time_segments[0] = t_[0] - blend_times[0] - 0.5 * blend_times[1]\n time_segments[n - 2] = t_[n - 2] - blend_times[n - 1] - 0.5 * blend_times[n - 2]\n time_segments[n - 1] = 0\n for i in range(1, n - 2):\n time_segments[i] = t_[i] - 0.5 * blend_times[i + 1] - 0.5 * blend_times[i]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n # Loop for each degree of freedom\n for d in range(dof):\n # j for using above parameters\n # previous_i for saving i of start of a parabola segment\n # previous_ii for saving i of start of a linear segment\n j = 0\n previous_i = 0\n previous_ii = 0\n\n # Loop over the timesteps\n for i in range(len(timesteps)):\n t = timesteps[i] - timesteps[previous_i]\n\n # If t is in the parabola range\n if t <= blend_times[j][d]:\n a = accelerations[j][d]\n\n qdd[d, i] = a\n qd[d, i] = qd[d, previous_i] + a * t\n\n if i == 0:\n q[d, i] = q_[0][d] + 0.5 * a * t * t\n else:\n q[d, i] = q[d, previous_i] + qd[d, previous_i] * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # If t is in the linear range\n elif t <= blend_times[j][d] + time_segments[j][d]:\n t = timesteps[i] - timesteps[previous_ii]\n v = velocities[j][d]\n\n qdd[d, i] = 0\n qd[d, i] = v\n q[d, i] = q[d, previous_ii] + v * t\n\n # If t has crossed the parabola plus the linear range\n else:\n previous_i = i - 1\n j += 1\n\n t = timesteps[i] - timesteps[previous_i]\n\n # Break loop if parameter exceeded\n if j == len(accelerations):\n break\n\n a = accelerations[j][d]\n v = qd[d, previous_i]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # Loop over remaining timesteps\n while i < len(timesteps):\n a = accelerations[j - 1][d]\n v = velocities[j - 1][d]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n i += 1\n\n return q, qd, qdd", "def get_interpolated_data(df=[], E_min=np.NaN, E_max=np.NaN, E_step=np.NaN):\n nbr_point = (E_max - E_min) / E_step\n \n # remove data outside specified range [x_min, x_max]\n #df = df.drop(df[df.E_eV < E_min].index)\n #df = df.drop(df[df.E_eV > E_max].index)\n\n # reset index\n df = df.reset_index(drop=True)\n \n # energy x_axis\n #x_axis = np.linspace(df['E_eV'].min(), df['E_eV'].max(), nbr_point)\n x_axis = np.linspace(E_min, E_max, nbr_point)\n y_axis_function = interp1d(x=df['E_eV'], y=df['Sig_b'], kind='linear')\n \n y_axis = y_axis_function(x_axis) \n \n return {'x_axis': x_axis, 'y_axis': y_axis}", "def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y" ]
[ "0.67891467", "0.6077751", "0.6063687", "0.6043078", "0.6020123", "0.5980172", "0.5974459", "0.59378576", "0.58838826", "0.5851971", "0.58322626", "0.57882226", "0.5773296", "0.5692933", "0.56908214", "0.5643147", "0.5640263", "0.55863905", "0.5576309", "0.5572141", "0.5560499", "0.55490106", "0.5536895", "0.5521815", "0.55202055", "0.5513205", "0.54969263", "0.54961514", "0.54809105", "0.54796743" ]
0.6808666
0
Configures an amfast.remoting.channel.ChannelSet object.
def setup_channel_set(channel_set): #amfast.logger = log.logger # Map service targets to controller methods cont_obj = app.controller.Controller() service = Service('DAService') service.mapTarget(CallableTarget(cont_obj.get_player_info, 'get_player_info')) service.mapTarget(CallableTarget(cont_obj.do_move, 'do_move')) service.mapTarget(CallableTarget(cont_obj.do_attack, 'do_attack')) service.mapTarget(CallableTarget(cont_obj.get_news, 'get_news')) service.mapTarget(CallableTarget(cont_obj.get_floor, 'get_floor')) service.mapTarget(CallableTarget(cont_obj.get_monster, 'get_monster')) service.mapTarget(CallableTarget(cont_obj.raiseException, 'raiseException')) channel_set.service_mapper.mapService(service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_channel_set(channel_set):\n\n # Send log messages to STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n amfast.logger.addHandler(handler)\n\n # Map service targets to controller methods\n cont_obj = controller.Controller()\n service = Service('ExampleService')\n service.mapTarget(CallableTarget(cont_obj.echo, 'echo'))\n service.mapTarget(CallableTarget(cont_obj.raiseException, 'raiseException'))\n channel_set.service_mapper.mapService(service)", "def setDescriptorChannels(self, dch): # real signature unknown; restored from __doc__\n pass", "def setup(self, channels):\n self.channels = channels[:]", "def set_channels(self, channels):\n if channels is None:\n self.channels = None\n return\n if isinstance(channels, Channels):\n self.channels = channels.create_channel_group(name='all')\n elif isinstance(channels, ChannelData):\n if not isinstance(channels, ChannelGroup):\n group_class = self.info.get_channel_group_class()\n self.channels = group_class(\n channels, indices=np.arange(channels.size), name='all')\n else:\n self.channels = channels.copy()\n else:\n raise ValueError(f\"Channels must be {Channels} or {ChannelData}, \"\n f\"not {channels}.\")", "def setURL(self, url):\n if self.radioConfig == None:\n raise Exception(\"No RadioConfig has been read\")\n\n # URLs are of the form https://www.meshtastic.org/d/#{base64_channel_set}\n # Split on '/#' to find the base64 encoded channel settings\n splitURL = url.split(\"/#\")\n b64 = splitURL[-1]\n\n # We normally strip padding to make for a shorter URL, but the python parser doesn't like\n # that. So add back any missing padding\n # per https://stackoverflow.com/a/9807138\n missing_padding = len(b64) % 4\n if missing_padding:\n b64 += '=' * (4 - missing_padding)\n\n decodedURL = base64.urlsafe_b64decode(b64)\n channelSet = apponly_pb2.ChannelSet()\n channelSet.ParseFromString(decodedURL)\n\n i = 0\n for chs in channelSet.settings:\n ch = channel_pb2.Channel()\n ch.role = channel_pb2.Channel.Role.PRIMARY if i == 0 else channel_pb2.Channel.Role.SECONDARY\n ch.index = i\n ch.settings.CopyFrom(chs)\n self.channels[ch.index] = ch\n self.writeChannel(ch.index)\n i = i + 1", "def setup_channels():\n\n # Setup channel encoders\n for c in channels:\n channels[c].setup()\n print()", "def set_chanlist(self,loc,newchannel):\n # TODO, add checks and illegal arguments to protect Pi\n # TODO actually add the functionality\n # self.chanlist(loc) = newchannel", "async def _set_channels(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n signup = await guild.create_text_channel(\"sign-ups\")\n await self.config.guild(guild).signup_channel.set(signup.id)\n\n host_role = await self.role_from_config(guild, \"host_id\")\n\n na_overwrites = {\n guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n host_role: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True\n ),\n guild.me: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True\n )\n }\n\n nightaction = await guild.create_text_channel(\n \"night-action\", overwrites=na_overwrites\n )\n await self.config.guild(guild).na_channel_id.set(nightaction.id)\n\n txt = _(\n \"Sign-ups: {}\\nNight Actions: {}\"\n ).format(\n signup.mention, nightaction.mention,\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Channels!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Created required channels!\")\n await ctx.send(txt)", "async def managechannels(self, ctx:commands.Context):", "def _set_channel_(self, channel):\n self._channel = channel", "def configure_multiple_channels(self, working_directory):\n allocated_generator_workers = sum(\n (channel.declaration.num_generators or 0)\n for channel in self.channel_configs\n )\n channels_without_workers = len(\n [\n channel.declaration.num_generators\n for channel in self.channel_configs\n if not channel.declaration.num_generators\n ]\n )\n remaining_generator_workers = (\n self.num_generator_workers - allocated_generator_workers\n )\n num_generators_per_channel = ceil(\n remaining_generator_workers / channels_without_workers\n )\n for idx, channel in enumerate(self.channel_configs):\n if self.debug_mode:\n self.logger.info(\"Initializing %s\", channel)\n channel_wd = working_directory / f\"channel_{idx}\"\n channel_wd.mkdir()\n recipe_options = channel.merge_recipe_options(self.recipe_options)\n generator_workers = (\n channel.declaration.num_generators or num_generators_per_channel\n )\n\n self.queue_manager.add_channel(\n org_config=channel.org_config,\n num_generator_workers=generator_workers,\n num_loader_workers=channel.declaration.num_loaders,\n working_directory=channel_wd,\n recipe_options=recipe_options,\n )", "async def set_channel(self, ctx: commands.Context):\n if ctx.message.author.id != conf.user:\n return None\n\n new_channel = ctx.channel.id\n\n conf.channel = new_channel\n\n log.info(f\"Bot channel set to channel with: #{ctx.channel} (ID:{ctx.channel.id})\")\n await ctx.message.channel.send(f\"✅ Set bot channel for {ctx.message.author} to #{ctx.channel}\")", "def set_channel(cls, channel):\n cls.channel = channel", "def set_channel_attributes(self, chan_names, clims=None):\n\n rdefs = {'defaultT': 0,\n 'model': 'color',\n 'projection': 'normal',\n 'defaultZ': 0}\n\n multiscale_dict = [{'datasets': [{'path': ARRAY_NAME}],\n 'version': '0.1'}]\n dict_list = []\n\n if clims and len(chan_names) < len(clims):\n raise ValueError('Contrast Limits specified exceed the number of channels given')\n\n for i in range(len(chan_names)):\n if clims:\n if len(clims[i]) == 2:\n if 'float' in self.dtype.name:\n clim = (float(clims[i][0]), float(clims[i][1]), -1000, 1000)\n else:\n info = np.iinfo(self.dtype)\n clim = (float(clims[i][0]), float(clims[i][1]), info.min, info.max)\n elif len(clims[i]) == 4:\n clim = (float(clims[i][0]), float(clims[i][1]), float(clims[i][2]), float(clims[i][3]))\n else:\n raise ValueError('clim specification must a tuple of length 2 or 4')\n\n first_chan = True if i == 0 else False\n if not clims or i >= len(clims):\n dict_list.append(self.create_channel_dict(chan_names[i], first_chan=first_chan))\n else:\n dict_list.append(self.create_channel_dict(chan_names[i], clim, first_chan=first_chan))\n\n full_dict = {'multiscales': multiscale_dict,\n 'omero': {\n 'channels': dict_list,\n 'rdefs': rdefs,\n 'version': 0.1}\n }\n\n self.current_pos_group.attrs.put(full_dict)", "async def configure(self, channel: Optional[Channel] = None) -> None:\n await self.qos(prefetch_count=self.concurrent)\n with suppress(SynchronizationError):\n await self._configure_queue()\n await self._dlx.configure()\n await self._configure_exchange()\n await self._configure_queue_bind()", "def _init_channel_configs(self, recipe):\n channel_decls = read_channel_declarations(recipe, self.loading_rules)\n\n if channel_decls:\n self.channel_configs = channel_configs_from_decls(\n channel_decls, self.project_config.keychain\n )\n elif self.bulk_mode == \"Serial\":\n self.channel_configs = [\n standard_channel_config(\n self.org_config,\n self.recipe_options,\n 1,\n 1,\n )\n ]\n else:\n self.channel_configs = [\n standard_channel_config(\n self.org_config,\n self.recipe_options,\n self.num_generator_workers,\n None,\n )\n ]", "def _setup_channels_and_queues(self, working_directory):\n additional_load_options = {\n \"ignore_row_errors\": self.ignore_row_errors,\n \"drop_missing_schema\": self.drop_missing_schema,\n }\n subtask_configurator = SubtaskConfigurator(\n self.recipe, self.run_until, self.bulk_mode, additional_load_options\n )\n self.queue_manager = SnowfakeryChannelManager(\n project_config=self.project_config,\n logger=self.logger,\n subtask_configurator=subtask_configurator,\n )\n if len(self.channel_configs) == 1:\n channel = self.channel_configs[0]\n self.queue_manager.add_channel(\n org_config=channel.org_config,\n num_generator_workers=channel.declaration.num_generators,\n num_loader_workers=channel.declaration.num_loaders,\n working_directory=working_directory,\n recipe_options=channel.declaration.recipe_options,\n )\n else:\n self.configure_multiple_channels(working_directory)", "async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "async def setcoachchannel(self, ctx, channel: int):\r\n if ctx.guild.id == 445092370006933505:\r\n await self.config.guild(ctx.guild).coachchannel.set(int(channel))\r\n await ctx.send(\"You set {} as the coaching channel\".format(channel))\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def addchan(channel):", "def configure(self):\n\t\tself.outChannel = CAClient(self.pvstring + \".AOUT\")\n\t\tself.outChannel.configure()\n\t\tself.inChannel = CAClient(self.pvstring + \".TINP\")\n\t\tself.inChannel.configure()", "def set_specific_channel(channel_number):\n global interface\n\n print(\"Set channel to {} on interface {}\".format(channel_number, interface))\n system(f\"iwconfig {interface} channel {channel_number}\")", "def request_configure(self, req, beams, channels, int_time, blank_phases):\n\n message = (\"nbeams={}, nchannels={}, integration_time={},\"\n \" nblank_phases={}\").format(beams, channels,\n int_time, blank_phases)\n log.info(\"Configuring FITS interface server with params: {}\".format(\n message))\n self.nbeams = beams\n self.nchannels = channels\n self.integration_time = int_time\n self.nblank_phases = blank_phases\n self._fw_connection_manager.drop_connection()\n self._stop_capture()\n self._configured = True\n return (\"ok\",)", "def setChannel(self, channel, ircChannel):\n channel = channel.lower()\n self.channels[channel] = ircChannel\n self.flush()", "async def _cmdf_setchannel(self, substr, msg, privilege_level):\n ch_obj = None\n if len(substr) == 0:\n ch_obj = msg.channel\n else:\n ch_obj = self._client.search_for_channel(substr, enablenamesearch=True, serverrestriction=self._server)\n\n if ch_obj is None:\n buf = \"**Error:** Channel not found. No changes were made.\"\n else:\n self._ch_msg_channelid = ch_obj.id\n self._save_settings()\n buf = \"In-channel greeting messages will now be sent in \" + utils.ch_to_mention(ch_obj) + \".\"\n await self._client.send_msg(msg, buf)\n return", "def _configure(self):\n pass", "def _set_channels(self, channels, ioupdate=False):\n\n # Activate selected channels\n assert (type(channels) is int) or (type(channels) is list), 'Channels must be int or list'\n\n # If only one channel is selected\n if type(channels) is int:\n assert channels in [0, 1, 2, 3], 'channel must be 0, 1, 2 or 3'\n self._write('CSR', [2**(channels) << 4 | self.CSR_LOW_NIBBLE])\n\n # If several channels are given\n elif type(channels) is list:\n for channel in channels: assert channel in [0,1,2,3], 'channels must be between 0, 1, 2 or 3'\n\n # Removing duplicates\n uniq_channels = [channel for channel in set(channels)]\n\n channel_nibble = int(0)\n for channel in uniq_channels:\n channel_nibble += 2**channel\n channel_nibble << 4\n self._write('CSR', [channel_nibble << 4| self.CSR_LOW_NIBBLE])\n\n if ioupdate:\n self._io_update()", "def set_channel(self, channel):\n self.response['channel'] = channel", "def rpc_config_set(self, options):\n\t\tfor option_name, option_value in options.items():\n\t\t\tself.config.set(option_name, option_value)\n\t\treturn", "def configure(self, options, conf):" ]
[ "0.64508337", "0.6233799", "0.62058055", "0.5804514", "0.580413", "0.57061034", "0.5590135", "0.558009", "0.5559871", "0.5559648", "0.55508906", "0.55389106", "0.55257565", "0.55187213", "0.5471973", "0.5471861", "0.5437038", "0.54244226", "0.5387382", "0.53786397", "0.5295505", "0.52113116", "0.52012634", "0.5187653", "0.5165484", "0.5073652", "0.5029948", "0.502284", "0.50109476", "0.49941227" ]
0.69039214
0
Description Returns talk string from character in current location If no character in current location is found, "There is no one to talk to." string is returned
def talk(self): if self.location.character: return self.location.character.talk() else: return "There is no one to talk to."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _current_char(self):\r\n\r\n return self._input_string[self._index]", "def look_at_character(from_char, to_char):\n visible = wornlist(from_char, top=True) \n if from_char == to_char:\n title = \"{} se regarde\".format(from_char.data['name'])\n else:\n title = \"{} regarde {}\".format(\n from_char.data['name'],\n from_char.remember.get_remember(to_char))\n content = \"{}</p><p>{}\".format(to_char.data['longdesc'], visible)\n fmt(from_char, title, content)", "def getchar(words,pos):\n\n\tif pos<0 or pos>=len(words): return None\n\n\treturn words[pos]", "def character(self) -> str:\r\n return self.char if self.was_guessed else '_'", "def currentChar(self):\r\n\t\tif self.index < len(self.source):\r\n\t\t\treturn self.source[self.index]\r\n\t\telse:\r\n\t\t\treturn None", "def get_character(self):\n\n return self.suggestion_set[2]", "def character(m) -> str:\n return m[0]", "def character(self) -> Optional[str]:\n if self.index < len(self.buffer):\n return self.buffer[self.index]\n return None", "def action(self):\n\n base_seq = super().get_base_seq(self.__arguments, self.__dna_data, \"#@\")\n str_base_seq = base_seq.get_dna_string()\n seq_to_find = super().get_seq_to_be_found(self.__arguments, self.__dna_data)\n return str(str_base_seq.find(seq_to_find))", "def getCharAtPos(self, row, col):\n return self.maze[row][col]", "def character(self):\n if self.is_alive():\n return \"[*]\"\n return \"[ ]\"", "def get_char_echo(self) -> str:\n ...", "def next_character(self) -> str:\n return self.seek(self.index + 1)", "def current(self) -> str:\n return self.s[self.pos]", "def get_character(self):\n return self.character", "def _peek_char(self):\n if self.read_pos > self.length:\n return \"\"\n\n return self.data[self.read_pos]", "def _get_interleving(self, index):\n try:\n index = self._char_indexes[index - 1]\n except IndexError:\n return \"\"\n s = \"\"\n while True:\n index += 1\n if index in self._char_indexes:\n break\n elif index in self._code_indexes:\n s += self._raw_string[index]\n else:\n break\n return s", "def peekChar(self):\r\n\t\tif (self.index + 1) < len(self.source):\r\n\t\t\treturn self.source[self.index + 1]\r\n\t\telse:\r\n\t\t\treturn None", "def display_position(game_board: list, character: list):\n print(\"You are currently here:\")\n for position in game_board:\n if position[0] == character[0] and position[1] == character[1]:\n print('C', end=\" \")\n else:\n print(\"*\", end=\" \")\n if position[1] == game_board[-1][1]:\n print(\"\")", "def display_char(self) -> None:\r\n print(self.char if self.was_guessed else '_', end=' ')", "def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)", "def func(self):\n if not self.raw:\n self.msg(\"Say what?\")\n return\n options = {\"is_pose\": True}\n speech = self.raw.lstrip(\" \")\n # calling the speech hook on the location\n speech = self.caller.location.at_say(speech)\n # Feedback for the object doing the talking.\n langstring = \"\"\n current = self.caller.languages.current_language\n if current and current.lower() != \"arvani\":\n langstring = \" in %s\" % current.capitalize()\n options.update({\"language\": current, \"msg_content\": speech})\n self.msg(\n 'You say%s, \"%s{n\"' % (langstring, speech),\n from_obj=self.caller,\n options=options,\n )\n # Build the string to emit to neighbors.\n pre_name_emit_string = ' says%s, \"%s{n\"' % (langstring, speech)\n self.caller.location.msg_action(\n self.caller, pre_name_emit_string, exclude=[self.caller], options=options\n )\n self.caller.posecount += 1", "def _get_specific_character(self):\n characters = self._get_all_characters(self._main_page)\n for character in characters.keys():\n if self._find_name.lower() in character.lower():\n return {character: characters[character]}\n else:\n raise CharacterNotFound", "def tell_position(self, curpos):\n \n # store, in case we need to repeat\n self.last_curpos = curpos\n\n # fetch room strings\n room_name, room_loc, room_dir = self.rooms[curpos]\n \n # build utterance text\n txt = labyrinth_text.youare.format(room_loc, room_name)\n\n #~ print '-' * 70\n #~ print txt\n \n #self.audio.play_presynthesized(curpos * 5)\n #~ self.audio.play_sound_file(self.sounds_location[curpos])\n self.audio.synthesize_and_play(txt)", "def cipherFromReflector(self, char):\n inputCharNum = self.GetNumByChar(char) #Finds the index of the value for the input character to find the internal wiring connection\n outputChar = self.chrNum(inputCharNum) #Finds the ASCII code of the index value to find the external rotor connection \n return outputChar #Returns the external character that the wiring is connected to", "def look(from_char, command):\n # first, we sanitize spaces\n command = re.sub(r\"\\s+\", \" \", command).lower()\n # regarder l'objet:\n match = re.match(r\"(le |la |les |l')?(\\w+)\\s*$\", command)\n if match:\n look_at(from_char, match.group(2))\n return\n # regarder dans la boite:\n match = re.match(r\"(dans|sur) (le |la |les |l')?(\\w+)\\s*$\", command)\n if match:\n look_in(from_char, match.group(3))\n return\n # regarder l'objet dans la boite\n match = re.match(r\"(le |la |les |l')?(\\w+)\\s(dans|de|sur)\\s(le |la |les |l')?(\\w+)\\s*$\", command)\n if match:\n look_at_in(from_char, match.group(5), match.group(2))\n return\n info(from_char.player, \"\"\"<b>Usage:</b>\n <code>regarder [le|la|les|l'] <i>mot_clé</i></code><br/>\n <code>regarder [dans|sur] [le|la|les|l'] <i>mot_clé</i></code><br/>\n <code>regarder [le|la|les|l'] <i>mot_clé</i> [dans|de|sur|d']\n [le|la|les|l'] <i>mot_clé</i></code>\"\"\")", "def print_message(self):\r\n # print(\"Word : \" + game_instance.get_word())\r\n print(\"\\nCurrent guess : \"+self.current_word)\r\n print(\"\\ng:guess, t:tell me, l:letter guess, q:quit\")", "def get_current_player(self) -> chr:\n return self._players[self._current_player]", "def get_character_reference(self):\n\n return self.current_character", "def cipherToReflector(self, char):\n inputCharNum = self.ordChar(char) #Finds the index value of the input ASCII code to find the external rotor connection\n outputChar = self.GetCharByNum(inputCharNum) #Finds the corresponding character of the input to find the internal wiring connection\n return outputChar #Finds the internal wiring contact that the character is connected to" ]
[ "0.6283816", "0.6197168", "0.6185099", "0.61481845", "0.6098568", "0.60792804", "0.60104406", "0.599413", "0.5783666", "0.5779067", "0.57776135", "0.57432467", "0.5702468", "0.56931794", "0.56701416", "0.56509495", "0.5648333", "0.56452805", "0.56345314", "0.5590737", "0.55749434", "0.5568484", "0.5559842", "0.5558279", "0.5554536", "0.5551868", "0.5542241", "0.5539578", "0.5538898", "0.5494535" ]
0.74233997
0
Description Checks if there is a character to fight in current location If there is none, "There is no one to fight here." string is returned Checks if character is Friend If Friend, returns result of Friend class fight() method Calls fight() method from Enemy class and passes current strength attribute to it Checks if lethal damage was received If lethal damage was received, "char_name crushes you, puny adventurer" string is returned If there is any loot in character, puts loot in backpack and places None value to loot attribute in that character Adds info about loot to returned value Puts None to character attribute of current location Returns string with information about fight result, health lost, health left and loot info(if any)
def fight(self): returned_string = "You killed {defeated_char_name}" \ "with {Weapon} and lost {health_lost} health. " \ "Your current health is {health_curr}" if self.location.character is None: return "There is no one to fight here." if isinstance(self.location.character, Friend): return self.location.characer.fight('friend') health_lost = self.location.character.fight(self.strength) if health_lost >= self.health: return f"{self.location.character.name} crushes you, puny adventurer\n" else: self.health -= health_lost defeated_char_name = self.location.character.name if self.location.character.loot.name: returned_string += f"You looted {self.location.character.loot.name} from {self.location.character.name}" self.backpack[self.location.character.loot.name] = self.location.character.loot self.location.character.loot = None self.location.character = None if self.equipped['Weapon'] is None: return returned_string.format(defeated_char_name=defeated_char_name, weapon="Unarmed", health_lost=health_lost, health_curr=self.health) return returned_string.format(defeated_char_name=defeated_char_name, weapon=self.equipped['Weapon'].name, health_lost=health_lost, health_curr=self.health)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fight(self, ctx):\r\n attacker = ctx.message.author.name\r\n defenders = ctx.message.mentions\r\n # only continue if valid attacker and defender\r\n attacker_ship = Ship.find_ship(attacker)\r\n if not attacker_ship:\r\n await ctx.send('{0}, you do not have a ship! `$ship` to get one'.format(ctx.message.author.mention))\r\n return\r\n if not defenders:\r\n await ctx.send('Who are you fighting? `$fight @user` to fight someone')\r\n # reset cooldowns when not successful fights\r\n # self.fight.reset_cooldown()\r\n return\r\n elif len(defenders) > 1:\r\n await ctx.send('Who are you fighting? One at a time (for now)')\r\n return\r\n else:\r\n defender = defenders[0].name\r\n\r\n if attacker == defender:\r\n attacker_ship.gold -= 50\r\n if attacker_ship.gold < 0:\r\n attacker_ship.gold = 0\r\n attacker_ship.update()\r\n await ctx.send('A mutiny has started on {0}\\'s ship! The treasure hold has been ransacked! '\r\n '{1} gold was taken.'.format(defender, 50))\r\n return\r\n\r\n defender_ship = Ship.find_ship(defender)\r\n if not defender_ship:\r\n await ctx.send('{0} does not have a ship! There are no fights'\r\n ' on the high sea if there are no ships to fight'.format(defender))\r\n return\r\n\r\n # actually start fight\r\n em = discord.Embed(title='{0} has attacked {1} :rage: '.format(attacker, defender), colour=0xDDDD00)\r\n\r\n # calculate who wins based on their attack and defense plus random number\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n attacker_msg = ''\r\n defender_msg = ''\r\n\r\n while attacker_ship.hull > 0 and defender_ship.hull > 0:\r\n attack = random.randint(1, 100)\r\n attack += attacker_ship.cannons + attacker_ship.crew\r\n\r\n defense = random.randint(1, 100)\r\n defense += defender_ship.cannons + defender_ship.crew\r\n\r\n defender_ship.damage_hull(attack)\r\n attacker_ship.damage_hull(defense)\r\n\r\n attacker_msg += 'Fired a volley of **{}** cannonballs <a:cannon:554558216889958400> \\n'.format(attack)\r\n defender_msg += '<a:cannon_reversed:554722119905181735> Return fired a volley of **{}** cannonballs \\n'.format(defense)\r\n\r\n\r\n\r\n if attacker_ship.hull > defender_ship.hull: # attacker wins\r\n # base gold at 100, more gold earned for harder fights, less or easier ones\r\n gold = 100 + (defender_ship.level() - attacker_ship.level()) * 2\r\n gold = gold if gold > 0 else 0\r\n attacker_ship.gold += gold\r\n attacker_ship.win += 1\r\n defender_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n\r\n em.add_field(name='{} is the winner! :crossed_swords:'.format(attacker),\r\n value='<a:treasure_chest:554730061463289857> They earned **{}** gold for their coffers.'.format(gold), inline=False)\r\n\r\n else: # defender wins\r\n defender_ship.win += 1\r\n attacker_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n em.add_field(name='{} is the winner! :shield:'.format(defender),\r\n value=' <a:armor:554559559545520128> Their ship survives to fight another day.', inline=False)\r\n\r\n await ctx.send(embed=em)", "def give(self):\n if self.location.character:\n item = input(f\"What do you want to give to {self.location.character.name}?\\n>\")\n if item in self.backpack:\n if self.location.character.give(item):\n if isinstance(self.location.character, Friend):\n loot = self.location.character.possession\n self.backpack[loot.name] = loot\n self.location.character.treat = None\n self.location.character.possession = None\n del self.backpack[item]\n return f\"{self.location.character.name} accepted your gift, and gave you {loot}\"\n if isinstance(self.location.character, Enemy):\n name = self.location.character.name\n self.location.character = None\n del self.backpack[item]\n return f\"You fend off {name} with {item}\"\n else:\n return f\"It does not accept {item}\"\n else:\n return f\"{self.location.character.name} does not like {item}\"\n else:\n return \"You don't have this\"\n else:\n return \"There is no one here\"", "def fight(who_fight=None):\r\n global monsters_defeated\r\n \r\n if isinstance(who_fight,helpful.Being):\r\n ###specific monster\r\n enemy = who_fight\r\n\r\n elif isinstance(who_fight,list):\r\n ###list of categories\r\n enemy = items_lists.random_monster(random.choice(who_fight))\r\n\r\n else:\r\n ###else picks a monster at random, not boss though\r\n enemy = items_lists.random_monster()\r\n \r\n\r\n\r\n # print 'fighting:\\n' + enemy.advanced_str()\r\n encountered = words.being_adj().capitalize() + ' ' + str(enemy)\r\n raw_input(str(player) + ' encounters a ' + encountered + '!\\n')\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n while choice == 'inventory':\r\n inspect_inventory()\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n if choice == 'yes':\r\n\r\n while enemy.get_health() > 0 and player.get_health() > 0:\r\n #player attacks\r\n item = helpful.pick_item(player.get_inventory(), 'What to use?')\r\n player.use(item)\r\n attack = item.get_damage()\r\n defend = item.get_health()\r\n\r\n if attack > 0:\r\n enemy.hit(item)\r\n raw_input('You dealt ' +str(attack) + ' damage!')\r\n elif defend > 0:\r\n raw_input('You gained ' + str(defend) + ' HP!')\r\n else:\r\n raw_input('That was pretty dumb.\\n')\r\n \r\n if enemy.get_health() > 0: #if the enemy is still alive\r\n\r\n ###enemy attacks, using random item in enemy's inventory\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n raw_input(str(enemy).capitalize() + ' used ' + str(enemy_choice) + '!\\n')\r\n raw_input('You lost ' + str(enemy_choice.get_damage()) + ' health!\\n')\r\n \r\n player.set_health(max(0,player.get_health())) #make health nonnegative\r\n enemy.set_health(max(0,enemy.get_health()))\r\n\r\n print('Player Health: ' + str(player.get_health()) + '\\n')\r\n raw_input(str(enemy) + ' Health: ' + str(enemy.get_health()) + '\\n')\r\n \r\n if enemy.get_health() == 0:\r\n winner = str(player)\r\n raw_input('You looted the following items:\\n' + enemy.get_inv_string())\r\n player.grab_items(enemy.get_inventory())\r\n result = 'win'\r\n monsters_defeated += 1\r\n\r\n if player.get_health() == 0:\r\n winner = str(enemy)\r\n result = 'death'\r\n\r\n print(winner + ' wins!\\n')\r\n\r\n elif choice == 'no':\r\n\r\n ouch = random.randrange(0,2)\r\n if enter_two == config.confus(config.config2):\r\n ouch = 0\r\n global cheated\r\n cheated = True\r\n print '<yolo>'\r\n if ouch:\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n print 'You got away, but were hit by the ' + \\\r\n str(enemy) +\"'s \" + str(enemy_choice) +'!' + '\\n'\r\n raw_input('You sustained ' + str(enemy_choice.get_damage()) +' damage.\\n')\r\n if player.get_health() <= 0:\r\n return 'death'\r\n else:\r\n raw_input('You got away safely!\\n\\nThat was close!\\n')\r\n result = 'lose'\r\n\r\n return result", "def fighter(mob):\r\n\tglobal player\r\n\trestore()\r\n\top = op_set(mob)\r\n\tt = None\r\n\tplayer = engine('you', fo)\r\n#\tprint fo\r\n\twhile mhp > 0 and php > 0:\r\n\t\tt = turn(t, op)\r\n\t\tprompt()\r\n\tif mhp <= 0 and php > 0:\r\n\t\treturn 'winner'\r\n\telif php <= 0 and mhp > 0:\r\n\t\treturn 'game_death'\r\n\telse:\r\n\t\tprint \"You both seemed to have died...\"\r\n\t\treturn 'game_death'", "def attack(self):\n\t if self.damage == 0:\n\t\treturn None\n\t elif self.name == \"die\":\n\t roll = random.randint(1,20)\n\t if roll == 1:\n\t return 0\n\t else:\n\t return 1\n\t elif self.damage == 1 or self.damage == 2:\n\t\treturn self.damage\n\t elif self.damage == 3:\n\t\treturn random.randint(3,5)\n\t elif self.damage == -4:\n\t return 4\n\t elif self.damage == 10:\n\t\trandomInt = random.randint(1,4)\n\t\tif randomInt == 1:\n\t\t return 10\n\t\telse:\n\t\t return 0\n\t else:\n\t return self.damage", "def alphabet_war(fight):", "def test():\r\n\tcharacter1=character(3)\r\n\t#i=0\r\n\t#for i in range(0, len(character1.attribute_list)):\r\n\t#\tcharacter1.attribute_list[i]=str(character1.attribute_list[i])\t\r\n\t#attribute_string=\", \".join(character1.attribute_list) #old ability stringer code\r\n\t#print (attribute_string)\r\n\tprint (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp))\r\n\tprint (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma))\r\n\tproflist=\", \".join(character1.proficiencies)\r\n\tprint (\"Proficiencies: %s\" %(proflist))\r\n\tprint (character1.quirk)\r\n\tprint (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket))\r\n\r\n\t\"\"\"\t\r\ndef main():\r\n\t\"\"\"\r\n\t#old main program\r\n\t\"\"\"\r\n\ttry:\r\n\t\tcharsave=stellagama.savefile()\r\n\t\tfile_name=charsave+\".txt\"\r\n\t\toutp = open(file_name,\"w\")\r\n\t\tfor i in range (1,6):\r\n\t\t\toutp.write(\"Character \"+str(i)+'\\r\\n')\r\n\t\t\toutp.write(\"========\"+'\\r\\n')\r\n\t\t\toutp.write(\"Strength: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Dexterity: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Constitution: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Intelligence: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Wisdom: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Charisma: \"+str(stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"Gold: \"+str(10*stellagama.dice(3,6))+'\\r\\n')\r\n\t\t\toutp.write(\"\"+'\\r\\n')\r\n\t\t\ti=+1\r\n\tfinally: #added to make sure the file is always closed no matter what\r\n\t\toutp.close() #close file\r\n\"\"\"", "def get_character_health(character: dict):\r\n print(\"Your health is: %d\" % character['HP'])", "def checkhealth(currentstrength, currenthunger):\n global HUNGER\n global STRENGTH\n flash = False\n grizzly_text = \"\"\n\n if currentstrength <= 0:\n if FIGHT:\n if GRIZZLY_BEAR:\n grizzly_text = \"grizzly \"\n printmessage(\"The %sbear has killed you.\" % grizzly_text, 7, MAGENTA, 2)\n else:\n printmessage(\"You have died from severe exhaustion.\", 5, RED, 2)\n die('tooweak')\n\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n STRENGTH = STRENGTH_TEXT[i]\n if currentstrength > 99:\n STRENGTH = STRENGTH_TEXT[0]\n if currentstrength <= 19: \n flash = True\n update_strength(flash)\n flash = False # Make sure flash isnt incorrectly set for hunger too\n\n if currenthunger <= 0:\n printmessage(\"You have died from malnutrition.\", 5, RED, 2)\n die('starved')\n\n for i in range(0, 5): \n hungerrange = (79, 59, 39, 19, 0)\n if currenthunger in range(hungerrange[i], hungerrange[i] + 20): \n HUNGER = HUNGER_TEXT[i]\n if currenthunger > 99:\n HUNGER = HUNGER_TEXT[0]\n if currenthunger <= 19: \n flash = True\n update_hunger(flash)", "def parse_fighter(self, response):\n info = {}\n\n fighter_name = response.xpath(\"//h2[@class='b-content__title']\" +\n \"/span[@class='b-content__title-highlight']/text()\").extract_first().strip()\n info[\"name\"] = fighter_name\n\n record = response.xpath(\"//h2[@class='b-content__title']\" +\n \"/span[@class='b-content__title-record']/text()\").extract_first().strip()\n\n # split recort into win, lose, and draw, and nc if applicable\n record = record.split(\": \")[1]\n w_l_d = record.split(\"-\")\n\n info[\"win\"] = w_l_d[0]\n info[\"lose\"] = w_l_d[1]\n\n if w_l_d[2].find(\"(\") == -1:\n info[\"draw\"] = w_l_d[2]\n info[\"nc\"] = \"0\"\n else:\n info[\"draw\"] = w_l_d[2][:w_l_d[2].find(\"(\") - 1]\n # no contest\n info[\"nc\"] = w_l_d[2][w_l_d[2].find(\"(\") + 1: ].split()[0]\n\n for sel in response.xpath(\"//div[@class='b-list__info-box \" +\n \"b-list__info-box_style_small-width js-guide']\" +\n \"/ul[@class='b-list__box-list']/li\"):\n item = sel.xpath(\".//i/text()\").extract_first()\n\n # remove character :\n item = item.strip().lower()[:-1]\n\n if item:\n item_value_sel = sel.xpath(\"text()\")\n \n # be careful of \"\\n\"\n if item_value_sel:\n item_value = item_value_sel.extract()[-1].strip()\n\n # use empty string for missing information\n info[item] = item_value if item_value != \"--\" else \"N/A\" \n else:\n info[item] = \"N/A\"\n\n # for debug\n #print(info)\n\n # career statistics\n stat = {}\n for sel in response.xpath(\"//div[@class='b-list__info-box-left clearfix']/div\" +\n \"/ul[@class='b-list__box-list b-list__box-list_margin-top']/li\"):\n item = sel.xpath(\".//i/text()\").extract_first()\n\n # remove character :\n item = item.strip()[:-1]\n\n # modify variable names\n item = item.replace(\".\", \"\").replace(\" \", \"_\")\n\n if item:\n item_value_sel = sel.xpath(\"text()\")\n \n # be careful of \"\\n\"\n if item_value_sel:\n item_value = item_value_sel.extract()[-1].strip()\n stat[item] = item_value\n else:\n stat[item] = \"N/A\"\n # for debug\n # print(info)\n # print(stat)\n\n # use loader to populate items\n loader = ItemLoader(item=UfcFighterItem(), response=response)\n\n for d in[info, stat]:\n for k, v in d.items():\n loader.add_value(k, v)\n \n loader.add_value(\"last_updated\", datetime.now())\n \n # yield each matach as an item\n ufc_fighter_item = loader.load_item()\n yield ufc_fighter_item", "def get_damage():\n\n return character['Damage']", "def fight(self, combat_item):\r\n print(self.name + \" doesn't want to fight with you\")\r\n return True", "def _health(self) -> str:\n bad = []\n if self.leech:\n bad.append(\"leech\")\n if self.zero:\n bad.append(\"last quality was zero\")\n\n if not bad:\n return \"OK\"\n else:\n return \" & \".join(bad)", "def fight(self, combat_item):\r\n if combat_item == self.weakness:\r\n print(\"You fend \" + self.name + \" off with the \" + combat_item )\r\n Character.victory_count +=1\r\n return True\r\n else:\r\n print(self.name + \" crushes you, puny adventurer\")\r\n return False", "def battle(first, second):\n\n print(get_catchphrase(first))\n print(get_catchphrase(second))\n\n if get_damage(second) > get_damage(first):\n return second\n else:\n return first", "def monster_check():\n player = get_locations()['player']\n monster = get_locations()['monster']\n if player == monster:\n if STATUS['weapon'] == 'armed':\n print(\"You killed the monster with the sword!\")\n play_again()\n else:\n if STATUS['hp'] > 0:\n STATUS['hp'] -= 5\n return \"The monster caught you! You barely manage to escape...\"\n elif STATUS['hp'] <= 0:\n print(\"The monster catachs you in its claws. Its not pretty.\")\n play_again()\n else:\n return \"Nothing in this room. Its around here somehwere though. \"", "def character():\n character_name = input(\"Enter a name for your adventurer: \")\n character_hp = 10\n return character_name, character_hp", "def on_attack(self, target, friendly):\n # Get buff from Dread Admiral Eliza\n if self.race == 'pirate' or self.race == 'all':\n eliza_buff_atk, eliza_buff_dfs = friendly.friendly_eliza_buff\n for each in friendly.minions:\n each.get_buff(eliza_buff_atk, eliza_buff_dfs)\n\n # If divine shield, not getting hurt\n if not self.divine_shield:\n self.hurt = True\n if not target.divine_shield:\n target.hurt = True", "def get_hp():\n\n return character['HP']", "def _fight_action(self, fight_action: FightActionModel) -> None:\n\n move_effects = fight_action.get_effects()\n\n fight_action.defender.hp = fight_action.defender.hp + move_effects.hp\n if fight_action.defender.hp < 0:\n fight_action.defender.hp = 0\n elif fight_action.defender.hp > fight_action.defender.stats[StatEnum.HP]:\n fight_action.defender.hp = fight_action.defender.stats[StatEnum.HP]\n\n for staged_stat, value in move_effects.staged_stats.items():\n if value > 0:\n fight_action.attacker.staged_stats[staged_stat] = min(6, fight_action.attacker.staged_stats[\n staged_stat] + value)\n elif value < 0:\n fight_action.defender.staged_stats[staged_stat] = max(-6, fight_action.defender.staged_stats[\n staged_stat] + value)\n\n fight_action.move.current_pp = fight_action.move.current_pp - 1 if fight_action.move.current_pp > 0 else 0", "def fight(self):\r\n\t\tif self.death():\r\n\t\t\treturn 0\r\n\t\tif self.ctime < 1:\r\n\t\t\tself.ctime += 0.05\r\n\t\telse:\r\n\t\t\tself.ctime = 0\r\n\t\t\tself.hit()", "def __str__(self):\n return f'Character name: {self.name}\\nhealth: {self.health}\\n' \\\n f'strength: {self.strength}\\nchance dodge: ' \\\n f'{round(self.chance_dodge, 2)}\\nchance critical:' \\\n f' {round(self.chance_critical, 2)} '", "def warriorBattle2():\n print(\"As the ogre charges at you, you jump to get out of the way.\")\n print(\"The ogre catches you mid air, and slams you in to the wall.\")\n print(f\"Your health is now {hero_data[0] - 100}\")\n print(\"You are dead.\")", "def fight(self, location):\n\n monster = location.monster\n\n while monster.hp > 0 and self.player.hp > 0:\n if monster.attackhit():\n print(\"The {} hits you!\".format(monster.name))\n self.player.hp -= monster.attackdamage()\n else:\n print(\"The {} misses!\".format(monster.name))\n\n if self.player.hp < 0:\n break\n\n print()\n print(\"You have {} HP and the monster has {}\".format(\n self.player.hp, monster.hp))\n\n input(\"input anything to attack\")\n if self.player.attackhit():\n print(\"You hit the {}!\".format(monster.name))\n monster.hp -= self.player.attackdamage()\n else:\n print(\"You miss!\")\n\n print()\n\n if monster.hp < 0:\n print(\"Congratulations! You have killed the {}! \".format(\n monster.name))\n location.remove_monster()", "async def battle(ctx):\n return await battle(ctx)", "def attack(self, target, friendly):\n self.on_attack(target, friendly)\n dmg = self.on_deal_dmg(target, friendly)\n self.death_remove(friendly)\n # May remove other minions in special cases\n # ... \n\n return dmg", "def fight(fighters):\n return {}", "def opponent_one_attack(opponent_one, opponent_two):\n dex_check_roll = roll_die(1, 20) # Roll a 1d20 to for Dexterity check\n if dex_check_roll > opponent_two['Dexterity']: # If greater than enemy Dexterity stat, do damage\n damage = class_hp(opponent_one['Class']) # Roll corresponding class die to determine damage\n print('You rolled a', dex_check_roll, 'and passed the Dexterity check. You did', damage, 'damage')\n opponent_two['HP'] -= damage # Replace opponent's HP value with the difference\n if opponent_two['HP'] <= 0: # If the HP is less than or equal to 0, then print death message\n print('Enemy has died')\n return opponent_two['HP'] # Return HP so combat_round function knows whether combat has ended\n else:\n print('Enemy now has', opponent_two['HP'], 'HP') # If enemy didn't die, then print remaining HP\n else:\n print('You rolled a', dex_check_roll, 'and failed the Dexterity check') # Prints failed dexterity check roll", "def attack(self, weapon: int, defender):\n attk = self._attack(weapon, defender)\n\n return f\"{str(defender)} has died.\" if attk else defender.current_health", "def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"" ]
[ "0.6515021", "0.6429588", "0.6201165", "0.60823286", "0.57243544", "0.5717124", "0.56975776", "0.5687571", "0.5674974", "0.56749326", "0.5654082", "0.5598205", "0.5558623", "0.5543056", "0.5525446", "0.55131817", "0.5504393", "0.55029356", "0.5498872", "0.5492188", "0.5482174", "0.5479372", "0.5464628", "0.54452586", "0.5443629", "0.54434294", "0.54423714", "0.5419666", "0.5407823", "0.53929716" ]
0.8229072
0
Description Checks if an item in current location exists Puts item in backpack Assigns None value to item attribute of current location Returns "You took item_name. And put it in your backpack." string Returns "There is nothing to take." string
def take(self): if self.location.item: self.backpack[self.location.item.name] = self.location.item item_name = self.location.item.name self.location.item = None return f"You took {item_name}. And put it in your backpack." else: return "There is nothing to take."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _item_exists(self, location):\n \"Does nothing\"", "def do_store(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n \r\n try:\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n \r\n # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.\r\n if item != None:\r\n print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n worldRooms[location][STORAGE].append(item)\r\n except KeyError:\r\n return(\"Don't even think about it buster brown.\")\r\n \r\n #item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n #if item != None:\r\n # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n # inventory.remove(item) # remove from inventory\r\n # worldRooms[location][STORAGE].append(item) # add to the container\r", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def poplar(self, item_to_be_popped):\n if self.check_inventory(item_to_be_popped): # Basic check to see if it's in the list\n als_lament = item_to_be_popped# ;P\n for an_item in self.bag_of_holding: # here we are extracting an the index of the object in the list\n if an_item.name == item_to_be_popped:\n index = self.bag_of_holding.index(an_item)\n to_be_returned = self.bag_of_holding[index]\n # and here is where the majic happens and the item is removed from the list.\n self.bag_of_holding.remove(self.bag_of_holding[index])\n else:\n # for testing porpoises if the item is not in dah bag, remove later.\n print(\" {} was not found in bag of holding.\".format(item_to_be_popped))\n return None\n return to_be_returned", "def do_put(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('You want to put \"%s\" in what?!' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n if item != None:\r\n print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][ITEMINV].append(item) # add to the container\r", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def take(self, item_name):\n # Delete item from the current room's inventory\n item = self.current_room.inventory.remove(item_name)\n\n # Add item to player's inventory\n if item is not None:\n self.player.add(item)\n print(f\"{item_name} taken.\")\n else:\n print(\"No such item.\")", "def LookOn(play, item):\r\n\tspk(\"You start perusing the items on %s\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookoner(play, item)\r\n\telse:\r\n\t\tspk(\"Nothing\")", "def get(self, command):\n\n for item in self.location.inventory:\n if item.name == command[1]:\n self.inventory.append(item)\n self.location.inventory.remove(item)\n print(\"You picked up a\", item.name)\n return\n print(command[1] + \" is not here!\")", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def pickup(self, item: Item) -> bool:\n if len(self.bag) >= 5:\n return False\n\n if self.__room._take(item):\n self.__bag.append(item)\n return True\n\n raise Exception(f\"{item} was not found in {self.room}\")", "def LookIn(play, item):\r\n\tspk(\"You look in %s and find\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookiner(item)\r\n\telse:\r\n\t\tspk(\"Nothing\")", "def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def pickUpItem(self, app, newItem: Stack):\n\n if newItem.isEmpty(): return\n\n # Prioritize existing stacks of the item first\n for (i, slot) in enumerate(self.inventory):\n stack = slot.stack\n if stack.isInfinite() and stack.item == newItem.item:\n # It just stacks into an infinite slot, so no change\n return\n elif newItem.isInfinite() and stack.item == newItem.item:\n # ditto\n return \n elif stack.amount > 0 and stack.item == newItem.item:\n self.inventory[i].stack.amount += newItem.amount\n return\n\n # If that fails, then just add the item to the next open space\n for (i, slot) in enumerate(self.inventory):\n if slot.isEmpty():\n self.inventory[i].stack = newItem\n return\n \n # TODO: Full inventory??\n 1 / 0", "def grab(self):\n if len(self.location.contents) == 0:\n print('Hate to break it to you, but there\\'s nothing to grab.')\n elif random() >= .75:\n item = self.location.contents[\n randrange(len(self.location.contents))]\n self.inventory.append(item)\n self.location.remove(item)\n print('Nice one, you actually managed to grab the {}! '\n 'I\\'m not even angry, I\\'m impressed.'.format(item))\n else:\n print('Well, at least you flailed in an impressive fashion.')", "def test_add_item(self):\n self.inv.add_item(self.item_helmet)\n str_inventory = self.inv.pretty\n str_item = self.item_helmet.pretty\n\n self.rebuild_instance()\n str_unequipped = self.inv.unequipped[0].pretty\n\n assert str_inventory == self.inv.pretty\n assert str_item == str_unequipped", "def checkforitems(curpos):\n if DARK and not HAS_FLASHLIGHT:\n printmessage(\"But you can't see a thing!\", 5, MAGENTA, 2) # was 2\n return\n\n if ITEM_LIST[curpos] != int(len(ITEMTYPES) - 2): # if the item at curpos isnt 'None'\n printmessage(\"You found a %s!\" % ITEMTYPES[ITEM_LIST[curpos]], 5, MAGENTA, 0)\n add_score(50)\n additemtoinventory(ITEM_LIST[curpos]) # funtion removes item from map\n pause_for_keypress()\n else:\n printmessage(\"You look around, and find nothing\", 5, CYAN, 2)", "def put_in_quiet(self, item):\n try:\n self.bag_of_holding.append(item)\n except:\n print('Error in Inventory method: put_in')", "def _item_not_found(item):\n if _is_element_present(PROMPT_BOX[\"Heading\"]):\n if \"not on file\" in _get_text(PROMPT_BOX[\"Heading\"]):\n return click_message_box_key(\"OK\", verify=False)\n return False", "def check_backpack(self):\n if self.backpack:\n list_of_items =\"\"\n for item in self.backpack:\n list_of_items += \">\" + self.backpack[item].name + \"\\n\" + \\\n self.backpack[item].description + \"\\n\" + \\\n self.backpack[item].usage + \"\\n\"\n return list_of_items\n else:\n return \"You have nothing in your backpack.\"", "def pick_up_item_from_ground(life,uid):\n\t#TODO: Misleading function name.\n\t_item = items.get_item_from_uid(uid)\n\t_id = add_item_to_inventory(life,_item)\n\t\n\tif _id:\n\t\treturn _id\n\n\traise Exception('Item \\'%s\\' does not exist at (%s,%s,%s).'\n\t\t% (item,life['pos'][0],life['pos'][1],life['pos'][2]))", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def _is_missing(self, item):\n pass", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def add_item(self, item_name):\n if not self.has_item(item_name):\n self.item_list.append(item_name)", "def has_item(item: Item):\n async def _wrapper(ctx):\n if not (res := 0 < await ctx.db.get(\"items\", ctx.author, item.id)):\n name = (f\"an \" if any(item.name.startswith(vowel) for vowel in \"aeiou\") else \"a \") + f\"**{item}**\"\n await ctx.send(f\"You need to own {name} in order to use this command.\" + (\n f\" You can go buy one from the shop! (`{ctx.clean_prefix}shop`)\" if item.buyable else \"\"\n ))\n return res\n\n return discord.ext.commands.check(_wrapper)", "def item_change_location(item,location,call):\n mms_id, holding_id, pid = item.find(\".//mms_id\").text, item.find(\".//holding_id\").text, item.find(\".//pid\").text\n item.find(\".//item_data/location\").text = location\n # On nettoie la cote présente au niveau de l'exemplaire\n item.find(\".//item_data/alternative_call_number\").text = ''\n item.find(\".//item_data/alternative_call_number_type\").text = ''\n # On ne renvoie pas les infos de la holdings\n holding_data = item.find(\".//holding_data\")\n item.remove(holding_data)\n # Si un autre exemplaire lié à la même notice a déjà été traité\n if mms_id in processed_record_dict:\n # Si la localisation était la même que celle de l'exemplaire déjà traité\n if location_code in processed_record_dict[mms_id]:\n # Si les cotes sont différentes alors on créé la cote sous l'exemplaire\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write(\"{}\\n\".format(barcode))\n item.find(\".//item_data/alternative_call_number\").text = call\n return mms_id, holding_id, pid", "def pickup_item(self, ):\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['ether']:\n self.pos_item['ether'] = (0, 0 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['tube']:\n self.pos_item['tube'] = (0, 1 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['needle']:\n self.pos_item['needle'] = (0, 2 * sprite_size)\n self.item_count += 1\n self.sound_item.play()", "def give(self):\n if self.location.character:\n item = input(f\"What do you want to give to {self.location.character.name}?\\n>\")\n if item in self.backpack:\n if self.location.character.give(item):\n if isinstance(self.location.character, Friend):\n loot = self.location.character.possession\n self.backpack[loot.name] = loot\n self.location.character.treat = None\n self.location.character.possession = None\n del self.backpack[item]\n return f\"{self.location.character.name} accepted your gift, and gave you {loot}\"\n if isinstance(self.location.character, Enemy):\n name = self.location.character.name\n self.location.character = None\n del self.backpack[item]\n return f\"You fend off {name} with {item}\"\n else:\n return f\"It does not accept {item}\"\n else:\n return f\"{self.location.character.name} does not like {item}\"\n else:\n return \"You don't have this\"\n else:\n return \"There is no one here\"" ]
[ "0.7072527", "0.6410307", "0.6302232", "0.6248276", "0.60911554", "0.5968652", "0.58929795", "0.587979", "0.5840509", "0.5822827", "0.57909214", "0.57850164", "0.5784702", "0.57736444", "0.57595176", "0.5756287", "0.5720788", "0.5718883", "0.5711406", "0.5669959", "0.5650222", "0.5648523", "0.5646641", "0.56371003", "0.55613375", "0.55594313", "0.5535422", "0.55300266", "0.55258304", "0.54951096" ]
0.7623877
0
Description Builds string of all item names, descriptions and usages of items in backpack dictionary Returns built string Returns "You have nothing in your backpack." string if backpack is empty
def check_backpack(self): if self.backpack: list_of_items ="" for item in self.backpack: list_of_items += ">" + self.backpack[item].name + "\n" + \ self.backpack[item].description + "\n" + \ self.backpack[item].usage + "\n" return list_of_items else: return "You have nothing in your backpack."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_description():\r\n return{\"I'll never yield!\":\"Grants a shield.\",\r\n \"Stay still!\":\"Affected unit cannot act in their turn.\"\r\n }", "def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr", "def descr(self):\n return self.recipe.get(\"descr\", \"no description\")", "def full_description(self):\n des = describe_dut(self.dut) if self.dut else ''\n if self.build:\n des += ' with ' + self.build\n if self.result_id:\n des += ' BVT result ID ' + str(self.result_id)\n return (self.description if self.description \n else 'unknown test') + ' on ' + des", "def description():", "def Description(self) -> str:", "def Description(self) -> str:", "def get_package_description(item: str) -> Union[str, None]:\n description = remove_prefix(item, PackageInfoPrefix.DESCRIPTION)\n return remove_suffix(description, \"\\n\")", "def __str__(self):\n return \"Bag({})\".format(self.items)", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def take(self):\n if self.location.item:\n self.backpack[self.location.item.name] = self.location.item\n item_name = self.location.item.name\n self.location.item = None\n return f\"You took {item_name}. And put it in your backpack.\"\n else:\n return \"There is nothing to take.\"", "def build_description(self):\n return self._build_description", "def toString(self):\n # TODO complete this method\n # No. 1\n description = \"Stock Name: \" + self.name + \"\\tBarcode: \" + self.barcode + \\\n \"\\tQty remaining: \" + str(self.quantity) + \"\\tSell by: \" + str(self.sellbydate)\n\n return description", "def run(self):\n logging.debug('List Available Recipes')\n if self.short:\n print(' '.join(pakit.recipe.RDB.names(desc=False)))\n return\n\n available = ['Program Description']\n available.extend(pakit.recipe.RDB.names(desc=True))\n\n msg = 'Available Recipes:'\n msg += PREFIX + PREFIX.join(available)\n print(msg)\n return msg", "def create_item_description(self, gameStateObj):\n surf = Engine.create_surface((98, 56 + 80), transparent=True)\n\n width, height = (96, 56) # ??\n item = gameStateObj.activeMenu.getSelection()\n \n real_surf = MenuFunctions.CreateBaseMenuSurf((width, height), 'BaseMenuBackgroundOpaque')\n BGSurf = Engine.create_surface((real_surf.get_width() + 2, real_surf.get_height() + 4), transparent=True, convert=True)\n BGSurf.blit(real_surf, (2, 4))\n BGSurf.blit(GC.IMAGESDICT['SmallGem'], (0, 0))\n # Now make translucent\n BGSurf = Image_Modification.flickerImageTranslucent(BGSurf, 10)\n\n if item.weapon and self.canWield(item):\n top = 4\n left = 2\n GC.FONT['text_white'].blit('Affin', BGSurf, (width//2 - GC.FONT['text_white'].size('Affin')[0] + left, 4 + top))\n GC.FONT['text_white'].blit('Atk', BGSurf, (5 + left, 20 + top))\n GC.FONT['text_white'].blit('AS', BGSurf, (width//2 + 5 + left, 20 + top))\n GC.FONT['text_white'].blit('Hit', BGSurf, (5 + left, 36 + top))\n GC.FONT['text_white'].blit('Avo', BGSurf, (width//2 + 5 + left, 36 + top))\n \n dam = str(self.damage(gameStateObj, item))\n acc = str(self.accuracy(gameStateObj, item))\n avo = str(self.avoid(gameStateObj, item))\n atkspd = str(self.attackspeed(item))\n AtkWidth = GC.FONT['text_blue'].size(dam)[0]\n HitWidth = GC.FONT['text_blue'].size(acc)[0]\n AvoidWidth = GC.FONT['text_blue'].size(avo)[0]\n ASWidth = GC.FONT['text_blue'].size(atkspd)[0] \n GC.FONT['text_blue'].blit(dam, BGSurf, (width//2 - 4 - AtkWidth + left, 20 + top))\n GC.FONT['text_blue'].blit(atkspd, BGSurf, (width - 8 - ASWidth + left, 20 + top))\n GC.FONT['text_blue'].blit(acc, BGSurf, (width//2 - 4 - HitWidth + left, 36 + top))\n GC.FONT['text_blue'].blit(avo, BGSurf, (width - 8 - AvoidWidth + left, 36 + top))\n\n item.drawType(BGSurf, width//2 + 8 + left, 3 + top)\n\n else: # assumes every non-weapon has a description\n if item.desc:\n words_in_item_desc = item.desc\n else:\n words_in_item_desc = \"Cannot wield.\"\n lines = TextChunk.line_wrap(TextChunk.line_chunk(words_in_item_desc), width - 8, GC.FONT['text_white'])\n\n for index, line in enumerate(lines):\n GC.FONT['text_white'].blit(line, BGSurf, (4 + 2, 4+index*16 + 4))\n\n surf.blit(BGSurf, (0, 76))\n\n if gameStateObj.cursor.position[0] > GC.TILEX//2 + gameStateObj.cameraOffset.get_x():\n rightflag = True\n else:\n rightflag = False\n\n if not self.generic_flag:\n BigPortraitSurf = self.bigportrait\n # If on the left, mirror the character portrait\n if not rightflag:\n BigPortraitSurf = Engine.flip_horiz(BigPortraitSurf)\n surf.blit(BigPortraitSurf, (2, 0))\n\n return surf", "def description(self):\n return ' '.join(self._description)", "def __str__(self):\n\n descr = \"You are in the \" + self.name + \"\\n\"\n for key in self.exits:\n descr += \"You can go \" + key + \" to the \" + self.exits[key].name + \"\\n\"\n for item in self.inventory:\n descr += \"There is a \" + item.name + \" here.\\n\"\n for item in self.objects:\n descr += item.name + \" is here.\"\n return descr", "def __str__(self):\n # These are required tags so we should have generated an\n # error beforehand and this shouldn't raise a ``KeyError``\n s = [(\"Album Title\", self[\"TITLE\"]), (\"Album Artist\", self[\"ARTIST\"]),\n (\"Year\", self[\"DATE_RECORDED\"]), (\"Genre\", self[\"GENRE\"])]\n s = OrderedDict(s)\n\n def add_optional(key):\n nonlocal s\n if key in self:\n text = key.replace('_', ' ').split(' ')\n text = ' '.join([x.capitalize() for x in text])\n s[text] = self[key]\n\n add_optional(\"LABEL\")\n add_optional(\"ISSUE_DATE\")\n add_optional(\"ORIGINAL_MEDIUM\")\n add_optional(\"VERSION\")\n add_optional(\"HD_FORMAT\")\n add_optional(\"DISC_NAME\")\n add_optional(\"PHASE_NAME\")\n if self.discs > 1:\n s[\"Disc\"] = self[\"PART_NUMBER\"]\n s[\"Discs\"] = self.discs\n if self.channels != \"2.0\":\n s[\"Channels\"] = self.channels\n # Now we have to deal with the formatted output. First we need\n # the maximum length of the keys to properly align the output\n # Note that the keys used will have a space appended, so we add 1\n max_len = max(len(x[0]) for x in s)+1\n\n # Output for an entry in ``s`` of (\"Year\", \"2016\") with a ``max_len`` of 10\n # would be: '= Year .....: 2016'\n def line(k, v):\n return f\"{k.ljust(max_len, '.')}: {v}\"\n\n s = [line(*x) for x in s.items()]\n # Now we can reuse ``max_len`` to mean the longest fully formatted line\n # We want to add '= ' to the left side and ' =' to the right side to\n # form a border\n max_len = max(len(x) for x in s)\n s = [f'= {x:{max_len}} =' for x in s]\n max_len += 4\n s = [\" ALBUM INFORMATION \".center(max_len, \"=\")] + s + [\"=\" * max_len]\n return \"\\n\".join(s)", "def toString(self):\n #TODO complete this method\n description = \"Stock Name: \" + self.name + \"\\tBarcode: \" + self.barcode + \\\n \"\\tQty remaining: \" + str(self.quantity)\n\n return description", "def print_inventory(self):\n print(\"Backpack:\")\n # Loop for each item in the players inventory\n for item in self.inventory:\n print('* ' + str(item))\n # Assigns the best weapon\n best_weapon = self.most_powerful_weapon()\n # print statement telling the best weapon in inventory\n print(\"Your best weapon is your {}\".format(best_weapon))", "def description(self) -> Optional[str]:\n return self._itempage.descriptions.get(\"en\", None)", "def get_description(self):", "def __str__(self) -> str:\r\n return f\"{self.name}, HP: {self.health}, items: {self.inventoryPack}\"", "def description(self):\n return f\"{self.name} has {len(self.playlist)} songs.\"", "def descString(self):\n return \"\".join ([self.Name, \" (AR \", str(self.AR), \", Max DEX \"\\\n , str(self.MaxDEXMod), \") - \", str(self.Value), \" gp\"])", "def get_description(self) -> str:\n pass", "def get_description(self):\n return COMPONENT_LIST[self.index][1]", "def getDescription(self):\n return self.base.get(\"description\", [])", "def description(self):", "def look(self, item_to_be_described):\n for item in self.bag_of_holding:\n if item_to_be_described == item.name:\n print('{}'.format(item.description))" ]
[ "0.61309785", "0.5916461", "0.57825965", "0.5776227", "0.5756864", "0.5594356", "0.5594356", "0.5589774", "0.55727327", "0.5532035", "0.55096483", "0.55088323", "0.54915565", "0.54891926", "0.5487656", "0.5467071", "0.54418087", "0.5440473", "0.5426421", "0.54158795", "0.54072875", "0.54058653", "0.53983086", "0.53792167", "0.5335939", "0.5334696", "0.5321127", "0.53202796", "0.5319431", "0.5307322" ]
0.68072677
0
Description Asks user to input item name to equip\n Checks if item name exists in backpack dictionary\n Returns "You don't have this" string if item name not found in backpack\n Checks if item type is equippable\n Puts item in equipped dictionary\n If item type is "Weapon" changes strength attribute of player to weapons strength attribute\n Returns "You have equipped item_name on item_type item slot" string\n Returns "You can not equip this" string otherwise
def equip(self): item_name = input("What item do you want to equip?\n>") if item_name in self.backpack: item = self.backpack[item_name] else: return "You don't have this" if item.type in self.equipped: self.equipped[item.type] = item if item.type == "Weapon": self.strength = item.strength return f"You have equipped {item.name} on {item.type} item slot" else: return "You can not equip this"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def equip(self, command):\n\n if len(command) > 1:\n if not self.weapon:\n for item in self.inventory:\n if item.name == command[1]:\n if command[1] == 'knife' or command[1] == 'rock' or command[1] == 'stick' or command[1] == 'lamp':\n self.inventory.remove(item)\n self.weapon.append(item)\n print(\"You equipped a \" + item.name)\n return\n else:\n print(\"You can't equip that\")\n else:\n print('You cannot equip two items \\nYou must unequip the ' + self.weapon[0].name + ' first.')\n else:\n print(\"Equip what?\")", "def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"", "def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")", "def item_info():\n item_code = get_input(\"Enter item code: \")\n if item_code in FULL_INVENTORY:\n print_dict = FULL_INVENTORY[item_code]\n output = \"\"\n for key, value in print_dict.items():\n output += (\"{}:{}{}\".format(key, value, \"\\n\"))\n else:\n output = \"Item not found in inventory\"\n print(output)\n return output", "def give(self):\n if self.location.character:\n item = input(f\"What do you want to give to {self.location.character.name}?\\n>\")\n if item in self.backpack:\n if self.location.character.give(item):\n if isinstance(self.location.character, Friend):\n loot = self.location.character.possession\n self.backpack[loot.name] = loot\n self.location.character.treat = None\n self.location.character.possession = None\n del self.backpack[item]\n return f\"{self.location.character.name} accepted your gift, and gave you {loot}\"\n if isinstance(self.location.character, Enemy):\n name = self.location.character.name\n self.location.character = None\n del self.backpack[item]\n return f\"You fend off {name} with {item}\"\n else:\n return f\"It does not accept {item}\"\n else:\n return f\"{self.location.character.name} does not like {item}\"\n else:\n return \"You don't have this\"\n else:\n return \"There is no one here\"", "def inspect_inventory(sell=False):\r\n choice = 'poop'\r\n\r\n if sell:\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n choice = helpful.pick_item(choices,'Sell something?','done')\r\n # if choice == 'done':\r\n if str(choice) == 'mythical kumquat':\r\n raw_input(\"You can't sell your \" + str(choice) + \"!\\n\")\r\n elif choice == 'done':\r\n return\r\n else:\r\n cost = choice.get_cost()\r\n question = 'Sell your ' + str(choice) + ' for $' + str(cost) + '?'\r\n sell_yn = helpful.pick_item(['yes','no'],question)\r\n if sell_yn == 'yes':\r\n cost = choice.get_cost()\r\n player.gain_money(cost)\r\n player.drop(choice)\r\n raw_input('You sold your ' + str(choice) + '. ' + \\\r\n \"That's $\" + str(cost) + ' more in your pocket.\\n')\r\n\r\n else: #if not selling\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n intro = 'Type item name/number for more info...\\n\\nInventory:' \r\n choice = helpful.pick_item(choices,intro,'done')\r\n if choice == 'done':\r\n return\r\n raw_input(choice.advanced_str())\r\n if choice.get_health() > 0:\r\n use_yn = helpful.pick_item(['yes','no'],'Use this item?')\r\n if use_yn == 'yes':\r\n player.use(choice)", "def unequip(self, command):\n\n if len(command) > 1:\n for item in self.weapon:\n if command[1] == item.name:\n if command[1] == 'knife' or command[1] == 'stick' or command[1] == 'rock':\n self.weapon.remove(item)\n self.inventory.append(item)\n print(\"You unequipped a \" + item.name)\n return\n else:\n print(\"You don't have anything equipped\")\n else:\n print(\"Unequip what?\")", "def check_equipped(self):\n equipped_str = \"\"\n for body_part, item in self.equipped.items():\n if item is None:\n equipped_str += f\"On {body_part} slot you have equipped nothing\"\n else:\n equipped_str += f\"On {body_part} slot you have equipped {item}\"\n return equipped_str", "def equip(self, item, actor):\n if (item.slot not in self.EqSlots.keys()):\n # Not an equipment.\n return 1\n\n old_item = self.EqSlots.get(item.slot)\n\n # Ok, equip and remove from list.\n self.EqSlots[item.slot] = item\n self.inventory_remove(item)\n item.give_bonus(actor)\n\n if (old_item is not None):\n # Was not empty - remove (any) old equipment bonus and add to inventory\n old_item.remove_bonus(actor)\n self.inventory_add(old_item)\n return 0", "async def equip(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n item = ' '.join(args)\n out = ch.equip_item(ctx.user_object, item.lower())\n await ctx.send(out)", "def do_eat(self, arg):\r\n itemToEat = arg.lower()\r\n\r\n if itemToEat == '':\r\n print('Eat what? Type \"inventory\" or \"inv\" to see whats in your inventory.')\r\n return\r\n\r\n cantEat = False\r\n\r\n for item in getAllItemsMatchingDesc(itemToEat, inventory):\r\n if worldItems[item].get(EDIBLE, False) == False:\r\n cantEat = True\r\n continue # there may be other items named this that you can eat, so we continue checking\r\n # NOTE - If you wanted to implement hunger levels, here is where\r\n # you would add code that changes the player's hunger level.\r\n print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n return\r\n\r\n if cantEat:\r\n print('I dont think the \"%s\" would like you to do that...' % (worldItems[item][SHORTDESC]))\r\n else:\r\n print('You do not have \"%s\". Type \"inventory\" or \"inv\" to see what in your inventory.' % (itemToEat))", "def do_store(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n \r\n try:\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n \r\n # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.\r\n if item != None:\r\n print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n worldRooms[location][STORAGE].append(item)\r\n except KeyError:\r\n return(\"Don't even think about it buster brown.\")\r\n \r\n #item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n #if item != None:\r\n # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n # inventory.remove(item) # remove from inventory\r\n # worldRooms[location][STORAGE].append(item) # add to the container\r", "def bum(backpack):\n loot = [[\"vodka\", \"food\", 1]]\n loot2 = [[\"gold coin\", \"other\", 1]]\n print(\"\\nBum says: \")\n if \"gold coin\" in backpack:\n if backpack['gold coin'][0] >= 1:\n vodka_sell = input(\n \"-I see gold in your pocket!\\nDo u wanna change 1 gold coin for 1 vodka?\\n(write yes to accept or no for reject)\\n\")\n if vodka_sell == \"yes\":\n try:\n vodka_ask = int(input(\"-How much vodka u need my friend?\\n\"))\n if vodka_ask <= backpack[\"gold coin\"][0]:\n print(\"GLUP \")\n inve.remove_item(backpack, loot2, vodka_ask) # removing coins from backpack\n inve.add_to_inventory(backpack, loot, vodka_ask) # adding vodka\n enter()\n else: # handling situation when u have no gold coins\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n except ValueError: # handling bugs with writing some other stuff then int\n print(\"(U need to write a number) \")\n enter()\n elif vodka_sell == \"no\":\n print(\"-Bye, come to papa again :0 \")\n enter()\n else:\n print(\"-I dont know what you talking about \")\n enter()\n else:\n print(\"-You must have at least 1 gold coin to buy vodka! \")\n enter()\n else:\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n return backpack", "def interact_with(arguments, player):\n inputted_item = \" \".join(arguments)\n current_loc = world.tile_exists(player.location_x, player.location_y)\n\n inventory_names = []\n for item in player.inventory:\n for name in item.name:\n inventory_names.append(name.lower())\n room_names = []\n for item in current_loc.items:\n for name in item.name:\n room_names.append(name.lower())\n\n # If it's in player inventory\n if inputted_item in inventory_names:\n for i, item in enumerate(player.inventory):\n if inputted_item in [name.lower() for name in item.name]:\n player.inventory[i].interact(player)\n return\n # If it's in the room\n elif inputted_item in room_names:\n for i, item in enumerate(current_loc.items):\n if inputted_item in [name.lower() for name in item.name]:\n current_loc.items[i].interact(player)\n return\n # If it's not in inventory or room\n else: #TODO: POSSIBLE ERROR - WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n print(\"Can't do that\")\n return", "def test_equip_helmet(self):\n inv_str = self.inv.pretty\n item_str = self.item_helmet.pretty\n ids_to_unequip = self.inv.equip(self.item_helmet)\n\n self.rebuild_instance()\n inv_str2 = self.inv.pretty\n helmet = self.inv.head.pretty\n\n assert inv_str.replace(\n \"head=None\", \"head='<HeadArmour(id=2)>'\").replace(\n \"equipped=[]\", \"equipped='[<HeadArmour(id=2)>]'\").replace(\n \"unequipped='[<HeadArmour(id=2)>]'\", \"unequipped=[]\") == inv_str2\n\n assert item_str.replace(\n \"equipped=False\", \"equipped=True\").replace(\n \"unequipped_position=0\", \"unequipped_position=None\") == helmet", "def use_on(arguments, player):\n\n for i, arg in enumerate(arguments):\n if arg == \"on\":\n index_of_on = i\n break\n item_1 = \" \".join(arguments[:index_of_on]) # String of item_1 in name format\n item_2 = \" \".join(arguments[index_of_on + 1:]) # String of item_2 in name format\n\n # Gathering and finding item names\n inventory_names = [] # All possible names for items in inventory\n room_names = [] # All possible names for items in the room\n for item in player.inventory:\n inventory_names += item.name\n for item in world.tile_exists(player.location_x, player.location_y).items:\n room_names += item.name\n\n # Making sure first item is in player's inventory\n if item_1 not in [item.lower() for item in inventory_names]:\n print(\"'\" + item_1 + \"' not in your inventory.\")\n return\n\n # Gets actual item_1 object\n else:\n for i, item in enumerate(player.inventory):\n if item_1 in [name.lower() for name in item.name]:\n item_1_index = i\n break\n\n # Making sure second item is in player's inventory or in the room\n if item_2 not in [item.lower() for item in inventory_names]:\n if item_2 not in [item.lower() for item in room_names]:\n print(\"'\" + item_2 + \"' not in your inventory or anywhere nearby.\")\n return\n\n # Getting the actual item_2 object (only reaches here if it is in room or inventory)\n # WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n for i, item in enumerate(player.inventory):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"inventory\"\n break\n for i, item in enumerate(world.tile_exists(player.location_x, player.location_y).items):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"room\"\n break\n\n # Calling the associated \"use\" function with the correct objects\n if item_2_location == \"inventory\":\n player.inventory[item_1_index].use(player.inventory[item_2_index], player)\n elif item_2_location == \"room\":\n player.inventory[item_1_index].use(\n world.tile_exists(player.location_x, player.location_y).items[item_2_index], player)\n return", "def _equip_item(life, item_id):\n\t_limbs = get_all_limbs(life['body'])\n\t_hand = can_hold_item(life)\n\titem = get_inventory_item(life, item_id)\n\t\n\tif not _hand:\n\t\tif 'player' in life:\n\t\t\tgfx.message('You don\\'t have a free hand!')\n\t\treturn False\n\t\n\tremove_item_in_storage(life, item_id)\n\t_hand['holding'].append(item_id)\n\t\n\t#logging.debug('%s equips a %s.' % (life['name'][0],item['name']))\n\t\n\treturn True", "def choose_inventory() -> list:\r\n print(\"What weapon would you like to start with? Enter the corresponding number\\n(1) Blaster Pistol\\n\"\r\n \"(2) Blaster Rifle\\n(3) Assault Cannon\\n(4) Sniper Rifle\\n\")\r\n item_list = [\"Blaster Pistol\", \"Blaster Rifle\", \"Assault Cannon\", \"Sniper Rifle\"]\r\n user_input = str(input())\r\n if user_input == \"1\":\r\n return [item_list[0]]\r\n elif user_input == \"2\":\r\n return [item_list[1]]\r\n elif user_input == \"3\":\r\n return [item_list[2]]\r\n elif user_input == \"4\":\r\n return [item_list[3]]\r\n else:\r\n print(\"Please enter a valid item number\")\r\n choose_inventory()", "def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")", "def equip(self, item_name: str, quantity: int) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.inventory, f\"Can't equip [{item_name}], not in inventory\")\n\n quantity_to_equip = min(quantity, self._player_data.inventory[item_name])\n if item_name in self._player_data.equipped_items:\n self._player_data.equipped_items[item_name] += quantity_to_equip\n else:\n self._player_data.equipped_items[item_name] = quantity_to_equip\n\n self.remove_from_inventory(item_name, quantity_to_equip)", "def trader(backpack):\n loot = [[\"gold coin\", \"other\", 1]]\n loot2 = [[\"corn\", \"food\", 1]]\n print(\"\\nTrader says: \")\n if \"corn\" in backpack:\n x = input(\"-Hey! So you want sell some corn mate?\\n(write yes or no): \")\n x = x.lower()\n if x == \"yes\":\n try:\n remove_corn = int(input(\"-How much u wanna sell?: \"))\n if remove_corn > backpack[\"corn\"][0]:\n print(\"-You dont have that much corn in ur backpack \")\n enter()\n else:\n print(\"-Thanks for corn :) \")\n inve.remove_item(backpack, loot2, remove_corn)\n inve.add_to_inventory(backpack, loot, remove_corn)\n enter()\n except ValueError:\n print(\"(U need to write a number): \")\n enter()\n elif x == \"no\":\n print(\"-Come to me when u wanna sell corn \")\n enter()\n else:\n print(\"(Your answer need to be yes or no) \")\n enter()\n else:\n print(\"-You dont have any corn, come to me when u get some \")\n enter()\n return backpack", "def do_put(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('You want to put \"%s\" in what?!' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n if item != None:\r\n print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][ITEMINV].append(item) # add to the container\r", "def add_new_item():\n #global FULL_INVENTORY\n item_code = get_input(\"Enter item code: \")\n item_desc = get_input(\"Enter item description: \")\n item_rental_price = get_input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n item_price = market_prices.get_latest_price(item_code)\n new_inventory_item = inventory_class.Inventory(item_code, item_desc,\n item_price, item_rental_price)\n is_furniture = get_input(\"Is this item a piece of furniture? (Y/N): \")\n if is_furniture.lower() == \"y\":\n item_material = get_input(\"Enter item material: \")\n item_size = get_input(\"Enter item size (S,M,L,XL): \")\n new_item = furniture_class.Furniture(new_inventory_item, item_material, item_size)\n else:\n is_electrical_appliance = get_input(\"Is this item an electric appliance? (Y/N): \")\n if is_electrical_appliance.lower() == \"y\":\n item_brand = get_input(\"Enter item brand: \")\n item_voltage = get_input(\"Enter item voltage: \")\n new_item = elec_appliances_class.ElecAppliances(new_inventory_item,\n item_brand, item_voltage)\n else:\n new_item = new_inventory_item\n FULL_INVENTORY[item_code] = new_item.return_as_dictionary()\n print(\"New inventory item added\")\n return new_item.return_as_dictionary", "def handle_item(curs):\n # inventory_char = {'1':[]}\n weapon_list = curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id=5;\n \"\"\")\n inventory_char = [weapon[0] for weapon in weapon_list if weapon[1] != None]\n # for weapon in weapon_list:\n # if weapon[1] != None:\n # inventory_char['1'].append(weapon[0])\n return inventory_char", "def handle_items():\n check50.exists(\"inventory.py\")\n # Take keys check\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS taken.\")\n\n # Drop keys check then look for dropped keys check\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\", \"OUT\", \"DROP keys\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS dropped.\")", "def box():\r\n raw_input(\"There's a note on a wooden box:\\n\\n\" + \\\r\n '\"Take an item, leave an item...\"\\n')\r\n choices = (\r\n ['box','done'],\r\n ['leave something in the box','ignore']\r\n )\r\n\r\n choice = helpful.pick_item(choices,'Do it.')\r\n\r\n if choice == 'box':\r\n\r\n gotem = pickle.load(open(\"box.txt\", \"rb\"))\r\n # gotem = helpful.Item('test_item')\r\n\r\n item = 'mythical kumquat'\r\n question = 'What to leave in the box?'\r\n\r\n while str(item) == 'mythical kumquat':\r\n item = helpful.pick_item(player.get_inventory(),question)\r\n question = 'No, really.'\r\n\r\n pickle.dump(item, open(\"box.txt\", \"wb\"))\r\n player.drop(item)\r\n player.grab(gotem)\r\n\r\n raw_input('You trade your ' + str(item) + ' for the item in the box:\\n')\r\n print gotem.advanced_str()", "def equip_item(life, item_id):\n\titem = get_inventory_item(life, item_id)\n\t\n\tif 'CAN_WEAR' in item['flags']:\n\t\tif not _equip_clothing(life, item_id):\n\t\t\treturn False\n\t\t\n\t\t_held = is_holding(life, item_id)\n\t\tif _held:\t\t\t\n\t\t\t#TODO: Don't breathe this!\n\t\t\t_held['holding'].remove(item_id)\n\t\t\n\telif 'CANNOT_HOLD' in item['flags']:\n\t\tlogging.error('Cannot hold item type: %s' % item['type'])\n\t\n\telse:\n\t\t_equip_item(life, item_id)\n\t\t\n\t\titems.process_event(item, 'equip')\n\t\n\tlife['speed_max'] = get_max_speed(life)\n\t\n\tif life['speed'] > life['speed_max']:\n\t\tlife['speed'] = life['speed_max']\n\t\n\tcreate_and_update_self_snapshot(life)\n\t\n\treturn True", "def do_use(self, arg):\r\n itemToUse = arg.lower()\r\n \r\n if itemToUse == '':\r\n print('Use what? Type \"inv\" to see the items in your invetory.')\r\n return\r\n \r\n cantUse = False\r\n \r\n #look up the item the player describes\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n if itemToUse not in invDescWords:\r\n print('You do not have that item to use it')\r\n return\r\n \r\n for item in getAllItemsMatchingDesc(itemToUse, inventory):\r\n if worldItems[item].get(USEABLE, True) == False:\r\n cantUse = True\r\n continue\r\n print('%s' % (worldItems[item][USEDESCTRUE]))\r\n #print('You use %s' % (worldItems[item][SHORTDESC]))\r\n #inventory.remove(item) \r\n return\r\n \r\n if cantUse:\r\n print('You cannot use \"%s\".' % (itemToUse))\r\n else:\r\n print('You do not have that item to use.')", "def inventory(game):\n\n # Offset for displaying list on-screen\n x, y = 6, 2\n # Currently selected item\n selection = 0\n # Max number of items shown at once\n max_items = 10\n # Number of items scrolled through so far\n scrolled = 0\n # Offset for messages\n x_msg, y_msg = 2, max_items + 4\n\n game.window.clear()\n while True:\n # Draw selection cursor\n game.window.addstr(y + selection - scrolled, x - 4, CURSOR)\n\n # Get items between current scroll amount and max_items\n items = list(enumerate(game.player.items))[scrolled:scrolled+max_items]\n\n # Print each item in inventory\n for i, item in items:\n # If more than 1, put the quantity\n if item.quantity > 1:\n formatted = '{}: {} x {}\\n'.format(i, item.quantity, item.name)\n else:\n formatted = '{}: {}\\n'.format(i, item.name)\n\n game.window.addstr(i + y - scrolled, x, formatted)\n\n # If equipped, put a little star next to the item\n if item in game.player.equipment.values():\n game.window.addstr(i + y - scrolled, x - 2, '*')\n\n key = game.window.getkey()\n\n if key == 'k' or key == 'KEY_UP':\n if selection > 0:\n selection -= 1\n\n # If the user tries to go above the screen, scroll up by one\n if selection < scrolled:\n scrolled -= 1\n\n game.window.clear()\n\n if key == 'j' or key == 'KEY_DOWN':\n if selection < len(game.player.items) - 1:\n selection += 1\n\n # If the user tries to go below the screen, scroll down by one\n if selection > scrolled + max_items - 1:\n scrolled += 1\n\n game.window.clear()\n\n if key == 'e':\n # Equip the selected item\n if game.player.items[selection].equippable:\n game.player.equip(game.player.items[selection])\n game.window.clear()\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot equip non-equippable item\")\n\n if key == 'c':\n # Eat the selected item\n if game.player.items[selection].kind == 'food':\n heal = game.player.items[selection].stats['hp']\n game.player.eat(game.player.items[selection])\n\n # Put selection cursor back to an item\n selection -= 1\n game.window.clear()\n\n game.window.addstr(y_msg, x_msg, \"Healed for {} hp\".format(heal))\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot eat non-food item\")\n\n if key == 'l':\n # Print the item name and description\n item = game.player.items[int(selection)]\n game.window.addstr(y_msg, x_msg, '{}\\n\\n{}'.format(item.name, item.desc))\n\n if key == 'q':\n break\n\n if key == '?':\n help_inventory(game)\n continue", "def addnewitem():\n\n itemcode = input(\"Enter item code: \")\n itemdescription = input(\"Enter item description: \")\n itemrentalprice = input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n itemprice = get_latest_price(itemcode)\n\n isfurniture = input(\"Is this item a piece of furniture? (Y/N): \")\n if isfurniture.lower() == \"y\":\n add_furniture(itemcode, itemdescription, itemprice, itemrentalprice)\n else:\n iselectricappliance = input(\"Is this item an electric appliance?\"\n \" (Y/N): \")\n if iselectricappliance.lower() == \"y\":\n add_appliance(itemcode, itemdescription, itemprice, itemrentalprice)\n add_non_furniture_nor_appliance(itemcode, itemdescription, itemprice,\n itemrentalprice)\n print(\"New inventory item added\")" ]
[ "0.711861", "0.67356426", "0.6632231", "0.65054053", "0.648374", "0.64348775", "0.6431", "0.64257723", "0.6394632", "0.63630646", "0.634077", "0.6334305", "0.63015056", "0.62882954", "0.6286444", "0.62789226", "0.62363315", "0.620253", "0.61553633", "0.6134195", "0.6131085", "0.60529727", "0.6037612", "0.6031057", "0.6021364", "0.59800905", "0.5974737", "0.5961837", "0.59556574", "0.595435" ]
0.85885817
0
Description Asks user to input item name to be given. Checks if character exists in the current location, Returns "There is no one here" string if no character exists. Checks if item name exists in backpack dictionary, Returns "You don't have this" string if item is not found. Checks return value of give() method from character in current location, If True, checks class of character object If Friend Puts possession attribute value to backpack Sets treat and possession attribute values of character in current location to None Deletes item given from backpack dictionary Returns "character_name accepted your gift, and gave you item_name" string If Enemy Sets character attribute value of current location to None Deletes item given from backpack dictionary Returns "You fend off character_name with item_name" string If False Returns "character_name does not like item_name" string
def give(self): if self.location.character: item = input(f"What do you want to give to {self.location.character.name}?\n>") if item in self.backpack: if self.location.character.give(item): if isinstance(self.location.character, Friend): loot = self.location.character.possession self.backpack[loot.name] = loot self.location.character.treat = None self.location.character.possession = None del self.backpack[item] return f"{self.location.character.name} accepted your gift, and gave you {loot}" if isinstance(self.location.character, Enemy): name = self.location.character.name self.location.character = None del self.backpack[item] return f"You fend off {name} with {item}" else: return f"It does not accept {item}" else: return f"{self.location.character.name} does not like {item}" else: return "You don't have this" else: return "There is no one here"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"", "def do_eat(self, arg):\r\n itemToEat = arg.lower()\r\n\r\n if itemToEat == '':\r\n print('Eat what? Type \"inventory\" or \"inv\" to see whats in your inventory.')\r\n return\r\n\r\n cantEat = False\r\n\r\n for item in getAllItemsMatchingDesc(itemToEat, inventory):\r\n if worldItems[item].get(EDIBLE, False) == False:\r\n cantEat = True\r\n continue # there may be other items named this that you can eat, so we continue checking\r\n # NOTE - If you wanted to implement hunger levels, here is where\r\n # you would add code that changes the player's hunger level.\r\n print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n return\r\n\r\n if cantEat:\r\n print('I dont think the \"%s\" would like you to do that...' % (worldItems[item][SHORTDESC]))\r\n else:\r\n print('You do not have \"%s\". Type \"inventory\" or \"inv\" to see what in your inventory.' % (itemToEat))", "def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"", "def interact_with(arguments, player):\n inputted_item = \" \".join(arguments)\n current_loc = world.tile_exists(player.location_x, player.location_y)\n\n inventory_names = []\n for item in player.inventory:\n for name in item.name:\n inventory_names.append(name.lower())\n room_names = []\n for item in current_loc.items:\n for name in item.name:\n room_names.append(name.lower())\n\n # If it's in player inventory\n if inputted_item in inventory_names:\n for i, item in enumerate(player.inventory):\n if inputted_item in [name.lower() for name in item.name]:\n player.inventory[i].interact(player)\n return\n # If it's in the room\n elif inputted_item in room_names:\n for i, item in enumerate(current_loc.items):\n if inputted_item in [name.lower() for name in item.name]:\n current_loc.items[i].interact(player)\n return\n # If it's not in inventory or room\n else: #TODO: POSSIBLE ERROR - WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n print(\"Can't do that\")\n return", "def do_take(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToTake = arg.lower()\r\n\r\n if itemToTake == '':\r\n print('Take what? Type \"look\" the items on the ground here.')\r\n return\r\n \r\n\r\n cantTake = False\r\n\r\n # get the item name that the player's command describes\r\n for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]):\r\n if worldItems[item].get(TAKEABLE, True) == False:\r\n cantTake = True\r\n continue # there may be other items named this that you can take, so we continue checking\r\n print(\"Taken.\")\r\n worldRooms[location][GROUND].remove(item) # remove from the ground\r\n inventory.append(item) # add to inventory\r\n return\r\n \r\n # something funny\r\n if itemToTake == 'chest':\r\n print(bcolors.start + \"Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git.\" + bcolors.end)\r\n return\r\n \r\n \r\n if cantTake:\r\n print('You cannot take \"%s\".' % (itemToTake))\r\n else:\r\n print('That is not in or around the area, maybe it was your imagination?')", "def lookiner(item):\r\n\tfrom menus import menu\r\n\tl = []\r\n\tfor n in item.items:\r\n\t\tl.append(n.name)\r\n\t\ti = menu(item.items[0].name, l)\r\n\tif i != None:\r\n\t\tf = item.items[i]\r\n\t\tf.add_options(item)\r\n\t\tr = f.option_list()\r\n\t\to = menu(r[0], r)\r\n\t\tif o == None:\r\n\t\t\tspk(\"You stop looking at the items in %s\" % item.name)\r\n\t\telif r[o] == 'take':\r\n\t\t\tspk(\"you remove %s from %s\" % (f.name, item.name))\r\n\t\t\ttake(item, f)\r\n\t\telse:\r\n\t\t\teval(f.options.get(r[o]))(play, f)\r\n\telse:\r\n\t\tspk(\"You stop looking at the items in %s\" % item.name)", "def checkforitems(curpos):\n if DARK and not HAS_FLASHLIGHT:\n printmessage(\"But you can't see a thing!\", 5, MAGENTA, 2) # was 2\n return\n\n if ITEM_LIST[curpos] != int(len(ITEMTYPES) - 2): # if the item at curpos isnt 'None'\n printmessage(\"You found a %s!\" % ITEMTYPES[ITEM_LIST[curpos]], 5, MAGENTA, 0)\n add_score(50)\n additemtoinventory(ITEM_LIST[curpos]) # funtion removes item from map\n pause_for_keypress()\n else:\n printmessage(\"You look around, and find nothing\", 5, CYAN, 2)", "def use_on(arguments, player):\n\n for i, arg in enumerate(arguments):\n if arg == \"on\":\n index_of_on = i\n break\n item_1 = \" \".join(arguments[:index_of_on]) # String of item_1 in name format\n item_2 = \" \".join(arguments[index_of_on + 1:]) # String of item_2 in name format\n\n # Gathering and finding item names\n inventory_names = [] # All possible names for items in inventory\n room_names = [] # All possible names for items in the room\n for item in player.inventory:\n inventory_names += item.name\n for item in world.tile_exists(player.location_x, player.location_y).items:\n room_names += item.name\n\n # Making sure first item is in player's inventory\n if item_1 not in [item.lower() for item in inventory_names]:\n print(\"'\" + item_1 + \"' not in your inventory.\")\n return\n\n # Gets actual item_1 object\n else:\n for i, item in enumerate(player.inventory):\n if item_1 in [name.lower() for name in item.name]:\n item_1_index = i\n break\n\n # Making sure second item is in player's inventory or in the room\n if item_2 not in [item.lower() for item in inventory_names]:\n if item_2 not in [item.lower() for item in room_names]:\n print(\"'\" + item_2 + \"' not in your inventory or anywhere nearby.\")\n return\n\n # Getting the actual item_2 object (only reaches here if it is in room or inventory)\n # WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n for i, item in enumerate(player.inventory):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"inventory\"\n break\n for i, item in enumerate(world.tile_exists(player.location_x, player.location_y).items):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"room\"\n break\n\n # Calling the associated \"use\" function with the correct objects\n if item_2_location == \"inventory\":\n player.inventory[item_1_index].use(player.inventory[item_2_index], player)\n elif item_2_location == \"room\":\n player.inventory[item_1_index].use(\n world.tile_exists(player.location_x, player.location_y).items[item_2_index], player)\n return", "def do_store(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n \r\n try:\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n \r\n # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.\r\n if item != None:\r\n print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n worldRooms[location][STORAGE].append(item)\r\n except KeyError:\r\n return(\"Don't even think about it buster brown.\")\r\n \r\n #item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n #if item != None:\r\n # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n # inventory.remove(item) # remove from inventory\r\n # worldRooms[location][STORAGE].append(item) # add to the container\r", "def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")", "def item_info():\n item_code = get_input(\"Enter item code: \")\n if item_code in FULL_INVENTORY:\n print_dict = FULL_INVENTORY[item_code]\n output = \"\"\n for key, value in print_dict.items():\n output += (\"{}:{}{}\".format(key, value, \"\\n\"))\n else:\n output = \"Item not found in inventory\"\n print(output)\n return output", "def do_use(self, arg):\r\n itemToUse = arg.lower()\r\n \r\n if itemToUse == '':\r\n print('Use what? Type \"inv\" to see the items in your invetory.')\r\n return\r\n \r\n cantUse = False\r\n \r\n #look up the item the player describes\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n if itemToUse not in invDescWords:\r\n print('You do not have that item to use it')\r\n return\r\n \r\n for item in getAllItemsMatchingDesc(itemToUse, inventory):\r\n if worldItems[item].get(USEABLE, True) == False:\r\n cantUse = True\r\n continue\r\n print('%s' % (worldItems[item][USEDESCTRUE]))\r\n #print('You use %s' % (worldItems[item][SHORTDESC]))\r\n #inventory.remove(item) \r\n return\r\n \r\n if cantUse:\r\n print('You cannot use \"%s\".' % (itemToUse))\r\n else:\r\n print('You do not have that item to use.')", "def take(self):\n if self.location.item:\n self.backpack[self.location.item.name] = self.location.item\n item_name = self.location.item.name\n self.location.item = None\n return f\"You took {item_name}. And put it in your backpack.\"\n else:\n return \"There is nothing to take.\"", "def has_item(item: Item):\n async def _wrapper(ctx):\n if not (res := 0 < await ctx.db.get(\"items\", ctx.author, item.id)):\n name = (f\"an \" if any(item.name.startswith(vowel) for vowel in \"aeiou\") else \"a \") + f\"**{item}**\"\n await ctx.send(f\"You need to own {name} in order to use this command.\" + (\n f\" You can go buy one from the shop! (`{ctx.clean_prefix}shop`)\" if item.buyable else \"\"\n ))\n return res\n\n return discord.ext.commands.check(_wrapper)", "def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")", "def beer():\r\n global cheated\r\n\r\n if enter_four == config.confus(config.config4):\r\n player.grab(helpful.Item('SixPack',10,0,0,6))\r\n cheated = True\r\n print '<achievement unlocked>\\n'\r\n\r\n if player.get_money() >= 17:\r\n\r\n player.set_health(100)\r\n player.lose_money(17)\r\n\r\n raw_input('You take out your money.\\n')\r\n raw_input(bartender_name + ' chuckles.\\n')\r\n raw_input('\"I guess we have this stuff, if you really need a drink.\"\\n')\r\n\r\n raw_input(\"The 'beer' healed you!\\n\")\r\n raw_input('It also cost $17.\\n')\r\n \r\n else:\r\n print bartender_name + ' chuckles and looks pointedly at his empty tip jar.\\n'\r\n raw_input('\"' +\"We're out of beer.\" + '\"\\n')\r\n raw_input('\"Nice try.\"\\n')", "def trader(backpack):\n loot = [[\"gold coin\", \"other\", 1]]\n loot2 = [[\"corn\", \"food\", 1]]\n print(\"\\nTrader says: \")\n if \"corn\" in backpack:\n x = input(\"-Hey! So you want sell some corn mate?\\n(write yes or no): \")\n x = x.lower()\n if x == \"yes\":\n try:\n remove_corn = int(input(\"-How much u wanna sell?: \"))\n if remove_corn > backpack[\"corn\"][0]:\n print(\"-You dont have that much corn in ur backpack \")\n enter()\n else:\n print(\"-Thanks for corn :) \")\n inve.remove_item(backpack, loot2, remove_corn)\n inve.add_to_inventory(backpack, loot, remove_corn)\n enter()\n except ValueError:\n print(\"(U need to write a number): \")\n enter()\n elif x == \"no\":\n print(\"-Come to me when u wanna sell corn \")\n enter()\n else:\n print(\"(Your answer need to be yes or no) \")\n enter()\n else:\n print(\"-You dont have any corn, come to me when u get some \")\n enter()\n return backpack", "def weapon_check():\n if get_locations()['player'] == get_locations()['weapon']:\n STATUS['weapon'] = 'armed'\n STATUS['locations']['weapon'] = None\n print(\"You found the weapon! Now go and kill the monster!\")", "def use(self):\n print(\"Type 'back' to go back.\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"filled kettle\":\n print(\"You turn off the fire and find a burnt note with \"\n \"letters 'gjkh'. It looks like a password of some kind.\")\n break\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def bum(backpack):\n loot = [[\"vodka\", \"food\", 1]]\n loot2 = [[\"gold coin\", \"other\", 1]]\n print(\"\\nBum says: \")\n if \"gold coin\" in backpack:\n if backpack['gold coin'][0] >= 1:\n vodka_sell = input(\n \"-I see gold in your pocket!\\nDo u wanna change 1 gold coin for 1 vodka?\\n(write yes to accept or no for reject)\\n\")\n if vodka_sell == \"yes\":\n try:\n vodka_ask = int(input(\"-How much vodka u need my friend?\\n\"))\n if vodka_ask <= backpack[\"gold coin\"][0]:\n print(\"GLUP \")\n inve.remove_item(backpack, loot2, vodka_ask) # removing coins from backpack\n inve.add_to_inventory(backpack, loot, vodka_ask) # adding vodka\n enter()\n else: # handling situation when u have no gold coins\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n except ValueError: # handling bugs with writing some other stuff then int\n print(\"(U need to write a number) \")\n enter()\n elif vodka_sell == \"no\":\n print(\"-Bye, come to papa again :0 \")\n enter()\n else:\n print(\"-I dont know what you talking about \")\n enter()\n else:\n print(\"-You must have at least 1 gold coin to buy vodka! \")\n enter()\n else:\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n return backpack", "def handle_item(curs):\n # inventory_char = {'1':[]}\n weapon_list = curs.execute(\n f\"\"\"SELECT name, item_ptr_id\n FROM\n (SELECT * FROM charactercreator_character_inventory as cii\n LEFT JOIN armory_item as ai\n ON cii.item_id = ai.item_id) as a\n LEFT JOIN armory_weapon as aw\n ON a.item_id=aw.item_ptr_id\n WHERE character_id=5;\n \"\"\")\n inventory_char = [weapon[0] for weapon in weapon_list if weapon[1] != None]\n # for weapon in weapon_list:\n # if weapon[1] != None:\n # inventory_char['1'].append(weapon[0])\n return inventory_char", "def func(self):\n char = self.character\n clothing = char.search(self.args, candidates=char.contents)\n if not clothing:\n return\n if not clothing.db.worn:\n char.msg(\"You're not wearing that!\")\n return\n if clothing.db.covered_by:\n char.msg(\"You have to take off %s first.\" % clothing.db.covered_by.name)\n return\n clothing.remove(char)", "def attack_choice(self, user_choice):\n\n if 'A' in user_choice:\n return 'What is the name of your item?'\n\n elif 'B' in user_choice:\n # IDEA: Should there be limit on flee?\n if randint(1, 4) == 3:\n return False\n else:\n return \"Well looks like your escape attempt failed.\"\n else:\n return \"Please choose either 'A' or 'B'\"", "def look_at_in(from_char, key_container, key_item):\n # look at something on character\n for character in from_char.room.characters:\n if key_container in from_char.remember.get_remember(character).lower():\n look_at_in_equipment(from_char, character, key_item)\n return\n # look at something in inventory\n item = from_char.inventory.get_item('shortdesc', key_container)\n if item:\n look_at_in_container(from_char, item, key_item)\n return\n # look at something in equipment\n item = from_char.equipment.get_item('shortdesc', key_container)\n if item:\n look_at_in_container(from_char, item, key_item)\n return\n # look at something in room\n item = from_char.room.container.get_item('roomdesc', key_container)\n if item :\n look_at_in_container(from_char, item, key_item)\n return\n # nothing found\n info(from_char.player,\n \"Aucun personnage ni objet ne correspond au mot clé « {} ».\"\n .format(key_container))", "def use(self):\n print_items()\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"little key\":\n print(\"You open the cabinet door.\")\n print(\"In it, there is a golden key.\")\n gk = GoldenKey('golden key')\n gk.take()\n break\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def valid_input(choices):\n while True:\n print_pause(\"\\nYou may:\")\n for choice in choices:\n print_pause(choice)\n valid_input = input(\"\\nWhat would you like to do?\\n\")\n if valid_input == \"inventory\":\n print_pause(\"You are currently carrying:\")\n for item in inventory:\n print_pause(item)\n elif valid_input == \"quit\":\n confirm = input(\"Are you sure you want to quit? \")\n if confirm == \"yes\" or confirm == \"y\":\n print(\"Thanks for playing!\\n\")\n sys.exit()\n else:\n for key in choices.keys():\n if valid_input.lower() in choices[key]:\n key = key.replace('\\033[1;32m', '').replace('\\x1b[0m', '')\n# print(f\"returning: {key}\")\n return key\n print_pause(\"I'm sorry - I don't understand that. Please select\"\n \" one of the following choices.\")", "def LookIn(play, item):\r\n\tspk(\"You look in %s and find\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookiner(item)\r\n\telse:\r\n\t\tspk(\"Nothing\")", "def find_items():\n check50.exists(\"item.py\")\n try:\n check50.run(run_command).stdin(\"in\").stdout(room_3_items)\n except check50.Failure as error:\n raise check50.Failure(\"Could not find items upon first entering room.\\n\" +\n \" Remember to seperate multiple items by a single newline\\n\" +\n f\" {error}\")\n # Check for look command\n try:\n check = check50.run(run_command)\n moves = [\"IN\", \"OUT\", \"IN\", \"LOOK\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS: a set of keys\")\n except check50.Failure as error:\n raise check50.Failure(f\"Could not find items when using LOOK.\\n {error}\")", "async def item(self, ctx, raid: Raid):\n\n def check_author(m):\n return m.author == ctx.author\n\n if raid:\n # Raid Found, ask user to start entering items\n await ctx.send(RAID_FOUND.format(raid_id=raid.id,\n raid_event_name=raid.event_name,\n raid_date=raid.date))\n item_log = ''\n while True:\n # Wait for item entry: <Character> <DKP> <Item Name>\n try:\n msg = await ctx.bot.wait_for('message', check=check_author, timeout=60)\n except asyncio.TimeoutError:\n break\n\n response = msg.content.replace(\"<\", \"\").replace(\">\", \"\")\n\n if \"done\" in response.lower():\n break\n\n if \"cancel\" in response.lower():\n return None\n\n parts = response.split()\n if len(parts) < 3:\n await ctx.send(f'The following response `{msg.content}` was not valid. Please try again.')\n continue\n\n character_part = parts[0]\n item_value_part = parts[1]\n item_name_part = parts[2:]\n\n # Validate the character\n character = [c for c in self.characters if c.name.lower() == character_part.lower()]\n if not character:\n await ctx.send(f'The following character `{character_part}` was not valid. Please try again.')\n continue\n character = character[0]\n\n # Validate the item value\n if not item_value_part.isnumeric():\n await ctx.send(f'The following dkp of `{item_value_part}` is not a number. Please try again.')\n continue\n item_value = int(item_value_part)\n\n # TODO validate item_name\n item_name = ' '.join(item_name_part).capitalize()\n\n raid_item = eqdkp.create_raid_item(item_date=raid.date,\n item_name=item_name,\n item_raid_id=raid.id,\n item_value=item_value,\n item_buyers=[character.id])\n if raid_item:\n await ctx.send(\n f\"`{item_name} was successfully charged to {character.name} for {item_value} dkp. \"\n f\"Continue with the next item, or type done.`\")\n item_log += f\"> {item_name.ljust(30)}{character.name.ljust(20)}{str(item_value).rjust(5)} DKP\\n\"\n\n else:\n await ctx.send(f\"`ERROR: {item_name} failed to get entered. Please try again`\")\n\n # Find and edit the raid log in #dkp-entry-log channel\n if len(item_log) > 0:\n async with ctx.typing():\n channel = ctx.bot.dkp_entry_log_channel\n messages = await channel.history(limit=50).flatten()\n messages = [m for m in messages if f\"Raid Entry Log [{raid.id}]\" in m.content]\n if messages:\n message = messages[0]\n items_purchased = f\"\"\"\\n\\n* Items Purchased\\n{item_log}```\"\"\"\n content = message.content[:-3] + items_purchased\n await message.edit(content=content)\n return await ctx.send(f'All done! #{channel.name} has been edited.')\n else:\n return await ctx.send(\n f\"`ERROR: I wasn't able to edit #{channel.name}. Please do so manually.`\")", "def get_take(self, item):\n item = ' '.join(item)\n if self.finished_places == 13:\n if item == 'meaning of life':\n print('you win')\n return False\n return super(North, self).get_take(item)\n # if item is meaning of life -- win" ]
[ "0.6434494", "0.6398872", "0.63884974", "0.62861234", "0.60229456", "0.5971386", "0.5952871", "0.5883416", "0.5823682", "0.58227223", "0.5797818", "0.57711345", "0.57433754", "0.5726849", "0.5726679", "0.5707468", "0.5656824", "0.56306726", "0.5624708", "0.5619921", "0.5597098", "0.5593726", "0.55936605", "0.5551877", "0.5544006", "0.5535301", "0.55349416", "0.5524602", "0.552196", "0.5503652" ]
0.73106873
0
Description Asks user to input an item name to be used Checks if it exists in self.backpack Checks if item type is Food Adds value of heal_amount attribute of the item to self.health value Reduces amount of item charges, deletes item from backpack if charges number become 0 Returns "You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored" string Returns "You cant eat this" string if item.type is not Food Returns "You dont have this" string if item does not exist in self.backpack
def use(self): return_string = '' item = input(f"What do you want to use?\n>") if item in self.backpack: if self.backpack[item].type is "Food": if (self.health + self.backpack[item].heal_amount) > standard_health: self.health = standard_health else: self.health += self.backpack[item].heal_amount self.backpack[item].charges -= 1 return_string = f"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored" if self.backpack[item].charges == 0: del self.backpack[item] return return_string else: return "You cant eat this" else: return "You dont have this"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"", "def give(self):\n if self.location.character:\n item = input(f\"What do you want to give to {self.location.character.name}?\\n>\")\n if item in self.backpack:\n if self.location.character.give(item):\n if isinstance(self.location.character, Friend):\n loot = self.location.character.possession\n self.backpack[loot.name] = loot\n self.location.character.treat = None\n self.location.character.possession = None\n del self.backpack[item]\n return f\"{self.location.character.name} accepted your gift, and gave you {loot}\"\n if isinstance(self.location.character, Enemy):\n name = self.location.character.name\n self.location.character = None\n del self.backpack[item]\n return f\"You fend off {name} with {item}\"\n else:\n return f\"It does not accept {item}\"\n else:\n return f\"{self.location.character.name} does not like {item}\"\n else:\n return \"You don't have this\"\n else:\n return \"There is no one here\"", "def heal(self):\n # Creates a list of consumables from the players inventory\n consumables = [item for item in self.inventory\n if isinstance(item, wp.Consumable)]\n # If there are no consumables then tells player he has not healing item\n if not consumables:\n print(\"You don't have any items to heal you!\")\n return\n elif self.hp == 100:\n print('Your Full HP!')\n return\n # Shows an item that can heal you\n for i, item in enumerate(consumables, 1):\n print(\"Choose an item to use to heal: \")\n print(\"{}. {}\".format(i, item))\n\n valid = False\n while not valid:\n print(\"type the number associated with the item to use otherw\\\nise type q to not use\")\n # Gets user input of what item they want to use to heal\n choice = input(\"\")\n # Checks to see if user typed in q\n if choice == 'q':\n # Deny the heal of that particular item/cancel the heal\n break\n # Any other option\n else:\n # Uses the item and heals the player and then removes the\n # item from the players inventory\n try:\n to_eat = consumables[int(choice) - 1]\n self.hp = min(100, self.hp + to_eat.healing_value)\n self.inventory.remove(to_eat)\n print(\"Current HP: {}\".format(self.hp))\n valid = True\n except (ValueError, IndexError):\n print(\"Invalid choice, try again.\")", "def bum(backpack):\n loot = [[\"vodka\", \"food\", 1]]\n loot2 = [[\"gold coin\", \"other\", 1]]\n print(\"\\nBum says: \")\n if \"gold coin\" in backpack:\n if backpack['gold coin'][0] >= 1:\n vodka_sell = input(\n \"-I see gold in your pocket!\\nDo u wanna change 1 gold coin for 1 vodka?\\n(write yes to accept or no for reject)\\n\")\n if vodka_sell == \"yes\":\n try:\n vodka_ask = int(input(\"-How much vodka u need my friend?\\n\"))\n if vodka_ask <= backpack[\"gold coin\"][0]:\n print(\"GLUP \")\n inve.remove_item(backpack, loot2, vodka_ask) # removing coins from backpack\n inve.add_to_inventory(backpack, loot, vodka_ask) # adding vodka\n enter()\n else: # handling situation when u have no gold coins\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n except ValueError: # handling bugs with writing some other stuff then int\n print(\"(U need to write a number) \")\n enter()\n elif vodka_sell == \"no\":\n print(\"-Bye, come to papa again :0 \")\n enter()\n else:\n print(\"-I dont know what you talking about \")\n enter()\n else:\n print(\"-You must have at least 1 gold coin to buy vodka! \")\n enter()\n else:\n print(\"-U dont have coins for this...god's drink, f... off! \")\n enter()\n return backpack", "def trader(backpack):\n loot = [[\"gold coin\", \"other\", 1]]\n loot2 = [[\"corn\", \"food\", 1]]\n print(\"\\nTrader says: \")\n if \"corn\" in backpack:\n x = input(\"-Hey! So you want sell some corn mate?\\n(write yes or no): \")\n x = x.lower()\n if x == \"yes\":\n try:\n remove_corn = int(input(\"-How much u wanna sell?: \"))\n if remove_corn > backpack[\"corn\"][0]:\n print(\"-You dont have that much corn in ur backpack \")\n enter()\n else:\n print(\"-Thanks for corn :) \")\n inve.remove_item(backpack, loot2, remove_corn)\n inve.add_to_inventory(backpack, loot, remove_corn)\n enter()\n except ValueError:\n print(\"(U need to write a number): \")\n enter()\n elif x == \"no\":\n print(\"-Come to me when u wanna sell corn \")\n enter()\n else:\n print(\"(Your answer need to be yes or no) \")\n enter()\n else:\n print(\"-You dont have any corn, come to me when u get some \")\n enter()\n return backpack", "def eat(self, command):\n \n if len(command) > 1:\n if self.inventory:\n for item in self.inventory:\n if item.name == command[1] and item.name != 'stick' and item.name != 'rock' and item.name != 'lamp' and item.name != 'stick':\n self.health += item.food\n if item.name == 'body':\n print(\"That's just gross..\")\n elif item.name == 'thing':\n print('It tasted like bacon..')\n elif item.name == 'plunger':\n print(\"+5 health, for effort..\")\n else:\n print('You consumed a ' + item.name)\n self.inventory.remove(item) \n print('Your health is now ' + str(self.health))\n else:\n print(\"You have no consumables in your inventory\")\n else:\n print('Consume what?')", "def take(self):\n if self.location.item:\n self.backpack[self.location.item.name] = self.location.item\n item_name = self.location.item.name\n self.location.item = None\n return f\"You took {item_name}. And put it in your backpack.\"\n else:\n return \"There is nothing to take.\"", "def do_eat(self, arg):\r\n itemToEat = arg.lower()\r\n\r\n if itemToEat == '':\r\n print('Eat what? Type \"inventory\" or \"inv\" to see whats in your inventory.')\r\n return\r\n\r\n cantEat = False\r\n\r\n for item in getAllItemsMatchingDesc(itemToEat, inventory):\r\n if worldItems[item].get(EDIBLE, False) == False:\r\n cantEat = True\r\n continue # there may be other items named this that you can eat, so we continue checking\r\n # NOTE - If you wanted to implement hunger levels, here is where\r\n # you would add code that changes the player's hunger level.\r\n print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n return\r\n\r\n if cantEat:\r\n print('I dont think the \"%s\" would like you to do that...' % (worldItems[item][SHORTDESC]))\r\n else:\r\n print('You do not have \"%s\". Type \"inventory\" or \"inv\" to see what in your inventory.' % (itemToEat))", "def healPokemon(user):\n\n display_pokemon(user.pokemon_party)\n\n #Ask which pokemon needs healing?\n pokemon_to_heal = user_selection(len(user.pokemon_party),f\"Which Pokemon do you want to heal?(1-{len(user.pokemon_party)}):\")\n\n #Ask which item to use\n db.execute(\"SELECT * FROM hp_restoring_items\")\n medicine = db.fetchall()\n\n for count, item in enumerate(medicine):\n print(f\"{count+1}) {item[1]}\")\n \n medicine_to_use = user_selection(len(medicine), f\"Which Healing Item do youy want to use?(1-{len(medicine)})\")\n print(medicine[medicine_to_use-1][1])\n print(user.heal_pokemon(pokemon_to_heal-1, medicine[medicine_to_use-1][1], medicine[medicine_to_use-1][3]))", "def add_to_player_health(self):\n\n if self.health == 100:\n return print(dedent(\"You are at full health you do not need nourishment.\"))\n\n else:\n\n valid_food = items.find_item(map.user, desired_type=\"food\")\n\n if valid_food:\n food = map.user.get_player_item_val(valid_food.name, map.user)\n\n if food:\n\n if food == 1:\n\n dropping = player_inventory.drop(validated=True)\n\n elif food > 1:\n self.health += valid_food.health_add\n food -= 1\n\n if self.health > 100:\n self.health = 100\n\n return print(dedent(\"Your health is now {}\".format(self.health)))", "def do_store(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n \r\n try:\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n \r\n # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.\r\n if item != None:\r\n print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n worldRooms[location][STORAGE].append(item)\r\n except KeyError:\r\n return(\"Don't even think about it buster brown.\")\r\n \r\n #item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n #if item != None:\r\n # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n # inventory.remove(item) # remove from inventory\r\n # worldRooms[location][STORAGE].append(item) # add to the container\r", "def buy_animal_food(self):\n if self.location == \"Shop\":\n response = input(\"How many bananas do you want to buy?\")\n while response not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n response = input(\"Please specify the number of beers\")\n money = self.money - int(response)\n if money >= 0:\n self.food_bag['Bananas'] += int(response)\n self.money = money\n print(f'You now got {response} more bananas than before, be careful not to drop one - you might slip!')\n else:\n print(\"You idiot apparently spent all your money on beers! You can\\'t buy animal food anymore - \"\n \"better go gambling!\")\n else:\n print('Your are not at the Shop right now, hard to find animal food here.')", "def beer():\r\n global cheated\r\n\r\n if enter_four == config.confus(config.config4):\r\n player.grab(helpful.Item('SixPack',10,0,0,6))\r\n cheated = True\r\n print '<achievement unlocked>\\n'\r\n\r\n if player.get_money() >= 17:\r\n\r\n player.set_health(100)\r\n player.lose_money(17)\r\n\r\n raw_input('You take out your money.\\n')\r\n raw_input(bartender_name + ' chuckles.\\n')\r\n raw_input('\"I guess we have this stuff, if you really need a drink.\"\\n')\r\n\r\n raw_input(\"The 'beer' healed you!\\n\")\r\n raw_input('It also cost $17.\\n')\r\n \r\n else:\r\n print bartender_name + ' chuckles and looks pointedly at his empty tip jar.\\n'\r\n raw_input('\"' +\"We're out of beer.\" + '\"\\n')\r\n raw_input('\"Nice try.\"\\n')", "def item_info():\n item_code = get_input(\"Enter item code: \")\n if item_code in FULL_INVENTORY:\n print_dict = FULL_INVENTORY[item_code]\n output = \"\"\n for key, value in print_dict.items():\n output += (\"{}:{}{}\".format(key, value, \"\\n\"))\n else:\n output = \"Item not found in inventory\"\n print(output)\n return output", "def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid of an item to add the {} to your inventory?\".format(newItem.name))\n\n if 'yes' in choice:\n dropping = player_inventory.drop()\n print(dedent('Okay, {} was removed from your inventory.'.format(item_name)))\n\n elif 'no' in choice:\n print(dedent('Okay redirecting you back to shop.'))\n return False\n\n else:\n print(dedent('Seems like you did not make a valid choice, aborting ...'))\n return False\n\n else:\n\n if newItem.type == \"food\":\n self.player_inventory[newItem.name] = newItem.health_addition\n elif newItem.type == \"weapon\":\n self.player_inventory[newItem.name] = newItem.quality\n\n print(dedent(\"\"\"\n ##############################################\n Nice, the {} has been added to your inventory!\n \"\"\".format(newItem.name)))", "def add_new_item():\n #global FULL_INVENTORY\n item_code = get_input(\"Enter item code: \")\n item_desc = get_input(\"Enter item description: \")\n item_rental_price = get_input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n item_price = market_prices.get_latest_price(item_code)\n new_inventory_item = inventory_class.Inventory(item_code, item_desc,\n item_price, item_rental_price)\n is_furniture = get_input(\"Is this item a piece of furniture? (Y/N): \")\n if is_furniture.lower() == \"y\":\n item_material = get_input(\"Enter item material: \")\n item_size = get_input(\"Enter item size (S,M,L,XL): \")\n new_item = furniture_class.Furniture(new_inventory_item, item_material, item_size)\n else:\n is_electrical_appliance = get_input(\"Is this item an electric appliance? (Y/N): \")\n if is_electrical_appliance.lower() == \"y\":\n item_brand = get_input(\"Enter item brand: \")\n item_voltage = get_input(\"Enter item voltage: \")\n new_item = elec_appliances_class.ElecAppliances(new_inventory_item,\n item_brand, item_voltage)\n else:\n new_item = new_inventory_item\n FULL_INVENTORY[item_code] = new_item.return_as_dictionary()\n print(\"New inventory item added\")\n return new_item.return_as_dictionary", "def addnewitem():\n\n itemcode = input(\"Enter item code: \")\n itemdescription = input(\"Enter item description: \")\n itemrentalprice = input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n itemprice = get_latest_price(itemcode)\n\n isfurniture = input(\"Is this item a piece of furniture? (Y/N): \")\n if isfurniture.lower() == \"y\":\n add_furniture(itemcode, itemdescription, itemprice, itemrentalprice)\n else:\n iselectricappliance = input(\"Is this item an electric appliance?\"\n \" (Y/N): \")\n if iselectricappliance.lower() == \"y\":\n add_appliance(itemcode, itemdescription, itemprice, itemrentalprice)\n add_non_furniture_nor_appliance(itemcode, itemdescription, itemprice,\n itemrentalprice)\n print(\"New inventory item added\")", "async def item(self, ctx, raid: Raid):\n\n def check_author(m):\n return m.author == ctx.author\n\n if raid:\n # Raid Found, ask user to start entering items\n await ctx.send(RAID_FOUND.format(raid_id=raid.id,\n raid_event_name=raid.event_name,\n raid_date=raid.date))\n item_log = ''\n while True:\n # Wait for item entry: <Character> <DKP> <Item Name>\n try:\n msg = await ctx.bot.wait_for('message', check=check_author, timeout=60)\n except asyncio.TimeoutError:\n break\n\n response = msg.content.replace(\"<\", \"\").replace(\">\", \"\")\n\n if \"done\" in response.lower():\n break\n\n if \"cancel\" in response.lower():\n return None\n\n parts = response.split()\n if len(parts) < 3:\n await ctx.send(f'The following response `{msg.content}` was not valid. Please try again.')\n continue\n\n character_part = parts[0]\n item_value_part = parts[1]\n item_name_part = parts[2:]\n\n # Validate the character\n character = [c for c in self.characters if c.name.lower() == character_part.lower()]\n if not character:\n await ctx.send(f'The following character `{character_part}` was not valid. Please try again.')\n continue\n character = character[0]\n\n # Validate the item value\n if not item_value_part.isnumeric():\n await ctx.send(f'The following dkp of `{item_value_part}` is not a number. Please try again.')\n continue\n item_value = int(item_value_part)\n\n # TODO validate item_name\n item_name = ' '.join(item_name_part).capitalize()\n\n raid_item = eqdkp.create_raid_item(item_date=raid.date,\n item_name=item_name,\n item_raid_id=raid.id,\n item_value=item_value,\n item_buyers=[character.id])\n if raid_item:\n await ctx.send(\n f\"`{item_name} was successfully charged to {character.name} for {item_value} dkp. \"\n f\"Continue with the next item, or type done.`\")\n item_log += f\"> {item_name.ljust(30)}{character.name.ljust(20)}{str(item_value).rjust(5)} DKP\\n\"\n\n else:\n await ctx.send(f\"`ERROR: {item_name} failed to get entered. Please try again`\")\n\n # Find and edit the raid log in #dkp-entry-log channel\n if len(item_log) > 0:\n async with ctx.typing():\n channel = ctx.bot.dkp_entry_log_channel\n messages = await channel.history(limit=50).flatten()\n messages = [m for m in messages if f\"Raid Entry Log [{raid.id}]\" in m.content]\n if messages:\n message = messages[0]\n items_purchased = f\"\"\"\\n\\n* Items Purchased\\n{item_log}```\"\"\"\n content = message.content[:-3] + items_purchased\n await message.edit(content=content)\n return await ctx.send(f'All done! #{channel.name} has been edited.')\n else:\n return await ctx.send(\n f\"`ERROR: I wasn't able to edit #{channel.name}. Please do so manually.`\")", "def do_take(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToTake = arg.lower()\r\n\r\n if itemToTake == '':\r\n print('Take what? Type \"look\" the items on the ground here.')\r\n return\r\n \r\n\r\n cantTake = False\r\n\r\n # get the item name that the player's command describes\r\n for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]):\r\n if worldItems[item].get(TAKEABLE, True) == False:\r\n cantTake = True\r\n continue # there may be other items named this that you can take, so we continue checking\r\n print(\"Taken.\")\r\n worldRooms[location][GROUND].remove(item) # remove from the ground\r\n inventory.append(item) # add to inventory\r\n return\r\n \r\n # something funny\r\n if itemToTake == 'chest':\r\n print(bcolors.start + \"Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git.\" + bcolors.end)\r\n return\r\n \r\n \r\n if cantTake:\r\n print('You cannot take \"%s\".' % (itemToTake))\r\n else:\r\n print('That is not in or around the area, maybe it was your imagination?')", "def fight(self, combat_item):\r\n print(self.name + \" doesn't want to fight with you\")\r\n return True", "def inspect_inventory(sell=False):\r\n choice = 'poop'\r\n\r\n if sell:\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n choice = helpful.pick_item(choices,'Sell something?','done')\r\n # if choice == 'done':\r\n if str(choice) == 'mythical kumquat':\r\n raw_input(\"You can't sell your \" + str(choice) + \"!\\n\")\r\n elif choice == 'done':\r\n return\r\n else:\r\n cost = choice.get_cost()\r\n question = 'Sell your ' + str(choice) + ' for $' + str(cost) + '?'\r\n sell_yn = helpful.pick_item(['yes','no'],question)\r\n if sell_yn == 'yes':\r\n cost = choice.get_cost()\r\n player.gain_money(cost)\r\n player.drop(choice)\r\n raw_input('You sold your ' + str(choice) + '. ' + \\\r\n \"That's $\" + str(cost) + ' more in your pocket.\\n')\r\n\r\n else: #if not selling\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n intro = 'Type item name/number for more info...\\n\\nInventory:' \r\n choice = helpful.pick_item(choices,intro,'done')\r\n if choice == 'done':\r\n return\r\n raw_input(choice.advanced_str())\r\n if choice.get_health() > 0:\r\n use_yn = helpful.pick_item(['yes','no'],'Use this item?')\r\n if use_yn == 'yes':\r\n player.use(choice)", "def choice1_end():\n print(\"You see a flash of light in the forest.\")\n print(\"Do you want to risk the forest to go see what it was?(yes or no)\")\n iron_dagger = input()\n if iron_dagger == \"yes\":\n print(\"You find an iron dagger!\")\n inventory.append(\"iron dagger\")\n print(inventory)\n elif iron_dagger == \"no\":\n print(\"You continue on\")\n else:\n iron_dagger", "def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")", "async def buybait(self, ctx:commands.Context, quantity:int, *bait_type:str):\r\n\r\n if not await self.IsSpecialized(ctx.guild, ctx.channel.id, SHOP_CHANNEL):\r\n await ctx.send('Cannot buy bait here\\nUse `add shop` to turn this channel into a shop')\r\n return\r\n\r\n bait_type = ' '.join(bait_type)\r\n if not bait_type in fishing_bait:\r\n await ctx.send(f'{bait_type} is not a valid form of bait')\r\n\r\n bulk_mod = await self.GetModifier(ctx.guild, 'bulk_purchase_mod')\r\n bulk_requirement = await self.GetSetting(ctx.guild, 'bulk_minimum')\r\n total = int(bait_prices[bait_type] * quantity * await self.GetModifier(ctx.guild, \"bait_price\") * (1 if quantity < bulk_requirement else 1 - bulk_mod))\r\n\r\n if not bank.can_spend(ctx.message.author, total):\r\n await ctx.send(f'You don\\'t have enough {await bank.get_currency_name(ctx.guild)}')\r\n return\r\n\r\n msg = await ctx.send(f'Are you sure you want to buy {bait_type} x{quantity} ({total} {await bank.get_currency_name(ctx.guild)})'\r\n + (f'\\n*-{100 * bulk_mod}% for buying in bulk*' if quantity >= bulk_requirement else ''))\r\n\r\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\r\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\r\n try:\r\n await ctx.bot.wait_for(\"reaction_add\", check=pred, timeout=15)\r\n except asyncio.TimeoutError:\r\n await msg.clear_reactions()\r\n return\r\n\r\n if pred.result is True:\r\n member_bait = await self.config.member(ctx.message.author).bait()\r\n member_bait[bait_type] += quantity\r\n await self.config.member(ctx.message.author).bait.set(member_bait)\r\n await msg.edit(content=f'{quantity} {bait_type} bought for {total} {await bank.get_currency_name(ctx.guild)}')\r\n await bank.withdraw_credits(ctx.message.author, total)\r\n else:\r\n await msg.edit(content='Sale cancelled')\r\n\r\n await msg.clear_reactions()", "def take(self, item_name):\n # Delete item from the current room's inventory\n item = self.current_room.inventory.remove(item_name)\n\n # Add item to player's inventory\n if item is not None:\n self.player.add(item)\n print(f\"{item_name} taken.\")\n else:\n print(\"No such item.\")", "def handle_input(self, event: EventType):\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if self.world.inventory.items[self.world.inventory.current_item]:\n # Activate flamethrower\n if self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.FLAMETHROWER:\n self.world.inventory.items[self.world.inventory.current_item].activated = True\n\n # Throw away pizza\n if self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.PIZZA:\n self.world.destination.set_mission_to_go_to_doominos()\n self.world.inventory.remove_item(InventoryItem.PIZZA)\n self.throw_pizza(pygame.mouse.get_pos())\n self.world.score.decrement_score(10)\n\n # Drink a Klok\n if self.world.inventory.items[self.world.inventory.current_item] and self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.KLOK:\n self.health = Constant.PLAYER_HEALTH\n self.world.inventory.remove_item(InventoryItem.KLOK)\n #print(self.world.inventory.items)\n\n # Knife\n if self.world.inventory.items[self.world.inventory.current_item] and self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.KNIFE:\n already_activated = self.world.inventory.items[self.world.inventory.current_item].activated\n self.world.inventory.items[self.world.inventory.current_item].activated = True\n if already_activated != self.world.inventory.items[self.world.inventory.current_item].activated:\n self.world.audio_manager.play_sfx(SFX.KNIFE_SWISH)\n\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if self.world.inventory.items[self.world.inventory.current_item]:\n # Deactivate flamethrower\n if self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.FLAMETHROWER:\n self.world.inventory.items[self.world.inventory.current_item].activated = False\n if self.world.inventory.items[self.world.inventory.current_item].item_type == InventoryItem.PIZZA:\n pass\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_f:\n pass\n # self.world.inventory.items[0].toggle()\n else:\n self.held_keys[event.key] = True\n\n elif event.type == pygame.KEYUP:\n self.held_keys[event.key] = False", "def getItemFromAisle(self):\n category_items = {\"0\": None}\n choice = None\n\n # While not exit\n while choice != \"0\": \n self.cart.refreshCartDF()\n self.updateAisleData()\n \n # Add items from a category into a dictionary to refer to.\n for i, item in enumerate(self.aisle_data.values):\n category_items[f\"{i+1}\"] = [item[0], item[1], int(item[2])] #[Item, price, stock]\n clear()\n\n \"\"\"\n 0) Don't add item to cart\n\n Items Price In stock\n 1) Chicken $5.20 14\n \"\"\"\n print(print_banner(self.name, self.aisle_name))\n print(\"The items on the shelves stare back at you...\")\n print(\"0) Don't add item to cart\\n\")\n print(\" Items Price In stock\")\n for i, item in enumerate(self.aisle_data.values):\n # option_num) Item, price, stock\n print(f\"{i+1}) {item[0]}{get_spaces(12-len(item[0]))} ${item[1]}{get_spaces(7-len(str(item[1])))} {int(item[2])}\") \n\n choice = input(\"\\nAdd an item to cart?\\n\")\n clear()\n print(print_banner(self.name, self.aisle_name))\n if choice == \"\":\n print(\"Please enter something!\")\n elif choice == \"0\":\n break\n elif choice in category_items: # Item chosen to add to cart\n while True: # Check if valid number added to cart\n clear()\n print(print_banner(self.name, self.aisle_name))\n print(f\"Selected item: \\033[1;33;40m{category_items[choice][0]} ({category_items[choice][2]})\\033[0;37;40m\\n\")\n amt = input(\"Number to add (0 to stop): \").strip()\n\n if amt == \"\" :\n print(\"Please enter an amount!\")\n enter_to_continue()\n continue\n elif amt.isnumeric():\n amt = int(amt)\n else:\n amt = -1\n if amt > category_items[choice][2]:\n print(\"That's too many!\")\n enter_to_continue()\n continue\n elif amt >= 0:\n break\n print(\"Invalid option!\")\n enter_to_continue()\n if amt == 0: # Don't add anything\n pass\n else:\n category_items[choice][2] -= amt\n self.cart.addItemToCart(category_items[choice][0], amt, category_items[choice][1]*amt, get_time())\n print(f\"Added {amt} {category_items[choice][0]} to cart\")\n enter_to_continue()\n else:\n print(\"Invalid option!\")\n enter_to_continue()", "def do_put(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('You want to put \"%s\" in what?!' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n if item != None:\r\n print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][ITEMINV].append(item) # add to the container\r", "def choose_inventory() -> list:\r\n print(\"What weapon would you like to start with? Enter the corresponding number\\n(1) Blaster Pistol\\n\"\r\n \"(2) Blaster Rifle\\n(3) Assault Cannon\\n(4) Sniper Rifle\\n\")\r\n item_list = [\"Blaster Pistol\", \"Blaster Rifle\", \"Assault Cannon\", \"Sniper Rifle\"]\r\n user_input = str(input())\r\n if user_input == \"1\":\r\n return [item_list[0]]\r\n elif user_input == \"2\":\r\n return [item_list[1]]\r\n elif user_input == \"3\":\r\n return [item_list[2]]\r\n elif user_input == \"4\":\r\n return [item_list[3]]\r\n else:\r\n print(\"Please enter a valid item number\")\r\n choose_inventory()", "def test_add_new_furniture(self):\n input_vars = ['4', 'Rug', '1', 'y', 'Berber', 's']\n inventory = {}\n with patch('builtins.input', side_effect=input_vars):\n main.add_new_item(inventory)\n self.assertEqual(inventory['4'],\n {\n 'product_code': '4',\n 'description': 'Rug',\n 'market_price': 24,\n 'rental_price': '1',\n 'material': 'Berber',\n 'size': 's'\n })" ]
[ "0.7249221", "0.6922872", "0.6793059", "0.6642906", "0.65641373", "0.62358886", "0.6145876", "0.6104031", "0.5942103", "0.59400934", "0.59339803", "0.5925581", "0.58787465", "0.58516383", "0.5764986", "0.56911844", "0.56813616", "0.56804574", "0.5635747", "0.5626358", "0.56182164", "0.5571842", "0.55590165", "0.5556056", "0.5552724", "0.55490094", "0.55422294", "0.5539743", "0.55287594", "0.5510868" ]
0.80603635
0
Can the QUALITY flags be parsed correctly?
def test_quality_flag_decoding_tess(): flags = list(TessQualityFlags.STRINGS.items()) for key, value in flags: assert TessQualityFlags.decode(key)[0] == value # Can we recover combinations of flags? assert TessQualityFlags.decode(flags[5][0] + flags[7][0]) == [flags[5][1], flags[7][1]] assert TessQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) \ == [flags[3][1], flags[4][1], flags[5][1]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_quality_filter_illumina_qual(self):\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=0.75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/0\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes (old barcode in header format)\r\n header = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual fails filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (3,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual passes filter if filter turned off\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=False)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # good qual passes filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))", "def test_quality_flag_decoding_kepler():\n flags = list(KeplerQualityFlags.STRINGS.items())\n for key, value in flags:\n assert KeplerQualityFlags.decode(key)[0] == value\n # Can we recover combinations of flags?\n assert KeplerQualityFlags.decode(flags[5][0] + flags[7][0]) == [flags[5][1], flags[7][1]]\n assert KeplerQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) \\\n == [flags[3][1], flags[4][1], flags[5][1]]", "def calculate_measureland_qualifier_flag_overall(row):\n\n mqf_tuple = (row['measureland_qualifier_flag_speed'],\n row['measureland_qualifier_flag_distance'],\n row['measureland_qualifier_flag_acceleration'],\n row['measureland_qualifier_flag_visual'])\n\n if mqf_tuple.count(3) >= 1:\n return 3 # probably bad value\n elif mqf_tuple.count(1) == len(mqf_tuple):\n return 1 # good value\n elif (mqf_tuple.count(9) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(9))):\n return 2 # probably good value\n elif (mqf_tuple.count(2) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(2))):\n return 2 # probably good value\n else:\n return 2 # values that have passed the quality check are likely to be of good quality according to the criteria used, so assign as probably good value", "def __parseQuantifiers(self):\n line = self.__nextLine()\n \n while line[0] in (\"e\", \"a\"): \n parts = line.split()\n if len(parts) > 2:\n typ = parts[0]\n if self.__lastQType == None:\n self.__lastQType = typ\n \n elif self.__lastQType == typ:\n self.__log(\"Not changing quantifiers\", \"SEVERE\")\n return False \n else:\n self.__lastQType = typ\n if parts[-1] == \"0\":\n variables = [Variable(x) for x in parts[1:-1] ]\n q = QuantifierList(typ, variables)\n self.__quantifierList.append(q)\n else:\n self.__log(\"Quantifier line not terminated with 0\",\"SEVERE\")\n return False\n else:\n self.__log(\"Quantifier line too short: %s\" % line, \"SEVERE\")\n return False\n \n line = self.__nextLine()\n self.__pushBackLine()\n if self.__lastQType == \"e\":\n return True\n else:\n self.__log(\"Not ending with e quantifier\")\n return False", "def test_MinimalQualParser(self):\r\n scores = ['>x', '5 10 5', '12',\r\n '>y', '30 40',\r\n '>a', '5 10 5', '12',\r\n '>b', '30 40']\r\n gen = list(MinimalQualParser(scores))\r\n self.assertItemsEqual(gen[0][1], [5, 10, 5, 12])\r\n self.assertItemsEqual(gen[1][1], [30, 40])\r\n self.assertItemsEqual(gen[2][1], [5, 10, 5, 12])\r\n self.assertItemsEqual(gen[3][1], [30, 40])", "def quality(value: str) -> str:\n if \"HDTV\" in value:\n return \"HDTV\"\n else:\n return \"SD\"", "def parse_qual_score(infile, value_cast_f=int):\r\n id_to_qual = dict([rec for rec in MinimalQualParser(infile, value_cast_f)])\r\n return id_to_qual", "def test_quality_filter_sequence_pass(self):\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))", "def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name", "def is_quantifier(s):\n return s == \"A\" or s == \"E\"", "def qual(args):\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(qual.__doc__)\n p.add_option(\n \"--qv\", default=31, type=\"int\", help=\"Dummy qv score for extended bases\"\n )\n p.set_outfile()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n sizes = Sizes(fastafile)\n qvchar = str(opts.qv)\n fw = must_open(opts.outfile, \"w\")\n total = 0\n for s, slen in sizes.iter_sizes():\n print(\">\" + s, file=fw)\n print(\" \".join([qvchar] * slen), file=fw)\n total += 1\n fw.close()\n logging.debug(\"Written {0} records in `{1}`.\".format(total, opts.outfile))", "def quality_matcher(fasta, full_fastq, filt_fastq, trunclen):\n with open(fasta, \"r\") as fasta, open(full_fastq, \"r\") as fastq, open(filt_fastq, \"w\") as new_fastq:\n #make lists of the fasta and fastq files, where every successive value is a successive line\n #purpose of -1: to avoid the \"\\n\" newline character at the end of the lines\n fastq_list = [line[:-1] for line in fastq]\n fasta_list = [line[:-1] for line in fasta]\n #iterate through the sequence ids in the fasta file\n for fasta_index, fasta_id in enumerate(fasta_list):\n if fasta_id[0] == \">\":\n #get the list index of the matching sequence id in the metagenomic fastq file\n fastq_index = fastq_list.index(\"@{}\".format(fasta_id[1:]))\n #print and write a new fastq entry with the quality scores string truncated to the same length as the sequence from the fasta file\n print(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))\n new_fastq.write(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))", "def parse_quality_for_video (self, video):\n quality = '720'\n if video['videoQuality']['hasHD']:\n quality = '1080'\n if video['videoQuality']['hasUltraHD']:\n quality = '4000'\n return quality", "def test_make_qual(self):\r\n qual_fp = os.path.join(self.sff_dir, 'test.qual')\r\n qual_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.qual')\r\n make_qual(self.sff_fp, qual_fp)\r\n make_qual(self.sff_gz_fp, qual_gz_fp)\r\n self.assertEqual(open(qual_fp).read(), qual_txt)\r\n self.assertEqual(open(qual_gz_fp).read(), qual_txt)", "def testQualityDictinary(self):\n for qual in ['bq', 'hq', 'uq']:\n for res in ['1080', '720', '480']:\n try:\n int(self.quality[qual][res])\n except ValueError:\n self.assertNotEqual(\n self.quality[qual][res],\n self.config.quality[qual][res]\n )\n self.assertEqual(\n tools.QUALITY_DEFAULT,\n self.config.quality[qual][res]\n )\n else:\n self.assertEqual(\n int(self.quality[qual][res]),\n self.config.quality[qual][res]\n )", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def _get_queries(args):\n if args.mode == '2DSEQ':\n queries = [\"@type=='2dseq'\", \"@is_spectroscopy==True\", \"@is_complex==True\"]\n elif args.mode == 'FID':\n queries = [\"@type=='fid'\", \"@is_spectroscopy==True\"]\n return queries + args.query", "def _quality_severity_type():\n return {\n 'name' : 'quality_severity_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('cosmetic', None),\n ('minor', None),\n ('major', None),\n ],\n }", "def M_ver(seq):\r\n num_AB = 0\r\n num_AnotB = 0\r\n num_BnotA = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n if np.array_equal(item, Quantifier.AnotB):\r\n num_AnotB += 1\r\n if np.array_equal(item, Quantifier.BnotA):\r\n num_BnotA += 1\r\n return (Quantifier.T if num_AB + num_AnotB > num_AB + num_BnotA\r\n else Quantifier.F)", "def __parse_arg_num(content: str, is_optional: bool, is_flag: bool) -> (ArgNum, int):\n match = __QUANTIFIER_REGEX.match(content)\n\n n = None\n offset = 0\n\n if match is None:\n if is_flag:\n quantifier = Quantifier.FLAG\n elif is_optional:\n quantifier = Quantifier.OPTIONAL\n else:\n quantifier = Quantifier.N\n n = 1\n else:\n body = match.string[:match.end()]\n\n if body == \"...\":\n quantifier = Quantifier.ANY if is_optional else Quantifier.AT_LEAST_ONE\n offset = 3\n elif body == \"*\":\n quantifier = Quantifier.ANY\n offset = 1\n elif body[0] == \"{\":\n try:\n n = int(match.group(1))\n except ValueError as err:\n raise PatternError(f\"bad quantifier: {err}\")\n\n if n == 0:\n quantifier = Quantifier.FLAG\n n = None\n else:\n quantifier = Quantifier.N\n\n offset = match.end()\n else:\n raise PatternError(f\"unknown quantifier found: '{match.string[:match.end()]}\")\n\n if is_optional and (quantifier == Quantifier.N and n != 1):\n raise PatternError(\"optional argument values must only have a Quantifier of 1\")\n\n return ArgNum(quantifier, n), offset", "def read_flags():\n return flag_args", "def representsQualifier(self, *args):\n return _libsbml.ASTBasePlugin_representsQualifier(self, *args)", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def _is_memory_usage_qualified(self) -> bool:\n\n def f(level) -> bool:\n return \"mixed\" in level or \"string\" in level or \"unicode\" in level\n\n return any(f(level) for level in self._inferred_type_levels)", "def is_quantifier(self):\n return RSTR_ROLE in self.args", "def test_avp_flags(self):\n self._compare_avp(\n avp.UnknownAVP(0, b''),\n memoryview(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x08'),\n )\n\n avp_val = avp.UnknownAVP(0, b'', flags=avp.FLAG_MANDATORY)\n self._compare_avp(\n avp_val,\n b'\\x00\\x00\\x00\\x00@\\x00\\x00\\x08',\n )\n self.assertFalse(avp_val.vendor_specific)\n self.assertTrue(avp_val.mandatory)\n self.assertFalse(avp_val.protected)\n\n avp_val = avp.UnknownAVP(0, b'', flags=avp.FLAG_PROTECTED)\n self._compare_avp(\n avp_val,\n b'\\x00\\x00\\x00\\x00 \\x00\\x00\\x08',\n )\n self.assertFalse(avp_val.vendor_specific)\n self.assertFalse(avp_val.mandatory)\n self.assertTrue(avp_val.protected)\n\n avp_val = avp.UnknownAVP(\n 0, b'', flags=avp.FLAG_VENDOR,\n vendor=avp.VendorId.TGPP,\n )\n self._compare_avp(\n avp_val,\n b'\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x0c\\x00\\x00(\\xaf',\n )\n self.assertTrue(avp_val.vendor_specific)\n self.assertFalse(avp_val.mandatory)\n self.assertFalse(avp_val.protected)\n\n avp_val = avp.UnknownAVP(\n 0, b'', flags=avp.FLAG_VENDOR\n | avp.FLAG_MANDATORY,\n vendor=avp.VendorId.TGPP,\n )\n self._compare_avp(\n avp_val,\n b'\\x00\\x00\\x00\\x00\\xc0\\x00\\x00\\x0c\\x00\\x00(\\xaf',\n )\n self.assertTrue(avp_val.vendor_specific)\n self.assertTrue(avp_val.mandatory)\n self.assertFalse(avp_val.protected)\n\n avp_val = avp.UnknownAVP(\n 0, b'', flags=avp.FLAG_VENDOR\n | avp.FLAG_MANDATORY\n | avp.FLAG_PROTECTED,\n vendor=avp.VendorId.TGPP,\n )\n self._compare_avp(\n avp_val,\n b'\\x00\\x00\\x00\\x00\\xe0\\x00\\x00\\x0c\\x00\\x00(\\xaf',\n )\n self.assertTrue(avp_val.vendor_specific)\n self.assertTrue(avp_val.mandatory)\n self.assertTrue(avp_val.protected)", "async def qualifier(client, event):\n return HATA_JAM_2_QUALIFIER", "def read_qual_score_filter(seq, qual, max_run_length, threshold):\r\n last_good_slice_end_pos = 0\r\n bad_run_length = 0\r\n for i in range(len(seq)):\r\n if qual[i] <= threshold:\r\n bad_run_length += 1\r\n else:\r\n bad_run_length = 0\r\n last_good_slice_end_pos = i + 1\r\n\r\n if bad_run_length > max_run_length:\r\n return seq[:last_good_slice_end_pos],\\\r\n qual[:last_good_slice_end_pos]\r\n\r\n # There were no runs that were too bad for too long\r\n return seq, qual", "def _CheckDataQuality(self, size_info=None, track_string_literals=True):\n size_info = size_info or self._size_infos[0]\n data_quality.CheckDataQuality(size_info, track_string_literals)", "def test_sample_one_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')" ]
[ "0.6236157", "0.591436", "0.562498", "0.5375986", "0.5312736", "0.528509", "0.5248987", "0.52123636", "0.51925087", "0.5133117", "0.5107898", "0.5096849", "0.50931865", "0.5090493", "0.5027097", "0.49722502", "0.49395365", "0.49296382", "0.49054652", "0.48811412", "0.4877389", "0.48755467", "0.48607868", "0.48602065", "0.4856489", "0.4847994", "0.4839237", "0.4811275", "0.48097938", "0.47830382" ]
0.59472173
1
Can we create a quality mask using KeplerQualityFlags?
def test_quality_mask(): quality = np.array([0, 0, 1]) assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=0)) assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=None)) assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask='none')) assert (KeplerQualityFlags.create_quality_mask(quality, bitmask=1)).sum() == 2 assert (KeplerQualityFlags.create_quality_mask(quality, bitmask='hardest')).sum() == 2 # Do we see a ValueError if an invalid bitmask is passed? with pytest.raises(ValueError) as err: KeplerQualityFlags.create_quality_mask(quality, bitmask='invalidoption') assert "not supported" in err.value.args[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_quality_flag_decoding_kepler():\n flags = list(KeplerQualityFlags.STRINGS.items())\n for key, value in flags:\n assert KeplerQualityFlags.decode(key)[0] == value\n # Can we recover combinations of flags?\n assert KeplerQualityFlags.decode(flags[5][0] + flags[7][0]) == [flags[5][1], flags[7][1]]\n assert KeplerQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) \\\n == [flags[3][1], flags[4][1], flags[5][1]]", "def test_quality_flag_decoding_tess():\n flags = list(TessQualityFlags.STRINGS.items())\n for key, value in flags:\n assert TessQualityFlags.decode(key)[0] == value\n # Can we recover combinations of flags?\n assert TessQualityFlags.decode(flags[5][0] + flags[7][0]) == [flags[5][1], flags[7][1]]\n assert TessQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) \\\n == [flags[3][1], flags[4][1], flags[5][1]]", "def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality", "def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))", "def __init__(self, resolution: int, mask: int = 0xFFFFFFFF, /):", "def audio_quality_key(option):\n return (\n AUDIO_RATING_DICT[option.media_type.audio_format],\n option.media_type.audio_bitrate\n )", "def quality(self, value: int):\n # TODO - Ensure that this is valid\n self._quality = value", "def quality(self):\n return self.plays * self.number", "def testQualityDictinary(self):\n for qual in ['bq', 'hq', 'uq']:\n for res in ['1080', '720', '480']:\n try:\n int(self.quality[qual][res])\n except ValueError:\n self.assertNotEqual(\n self.quality[qual][res],\n self.config.quality[qual][res]\n )\n self.assertEqual(\n tools.QUALITY_DEFAULT,\n self.config.quality[qual][res]\n )\n else:\n self.assertEqual(\n int(self.quality[qual][res]),\n self.config.quality[qual][res]\n )", "def compression_quality(self, quality):\n if not isinstance(quality, numbers.Integral):\n raise TypeError('compression quality must be a natural '\n 'number, not ' + repr(quality))\n r = library.MagickSetImageCompressionQuality(self.wand, quality)\n if not r:\n raise ValueError('Unable to set compression quality to ' +\n repr(quality))", "def enforce_quality_limits(self):\n if self.orig_quality <= 50:\n if self.quality >= 50:\n self.quality = 50", "def quality(value: str) -> str:\n if \"HDTV\" in value:\n return \"HDTV\"\n else:\n return \"SD\"", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def get_sample_mask(self):", "def test_quality_filter_illumina_qual(self):\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=0.75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/0\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes (old barcode in header format)\r\n header = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual fails filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (3,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual passes filter if filter turned off\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=False)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # good qual passes filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))", "def video_quality_key(option):\n return (\n VIDEO_RATING_DICT[option.media_type.file_type],\n product(option.media_type.resolution),\n option.media_type.video_bitrate,\n )", "def SetInterpolationQuality(*args, **kwargs):\n return _gdi_.GraphicsContext_SetInterpolationQuality(*args, **kwargs)", "def test_apply_flags():\n true_value = dqflags.pixel['HOT'] + dqflags.pixel['DO_NOT_USE']\n\n print(true_value)\n\n badmap = np.zeros((10, 10), dtype=np.int)\n true_map = np.zeros((10, 10), dtype=np.uint32)\n for i in range(10):\n badmap[i, i] = 1\n true_map[i, i] = true_value\n\n\n print(true_map)\n\n\n flag_names = ['HOT', 'DO_NOT_USE']\n pixmap = bpd.apply_flags(badmap, flag_names)\n\n\n print(pixmap)\n\n\n assert np.all(pixmap == true_map)", "def genQuality(self):\n return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)", "def _cim_quality():\n return {\n 'type' : 'class',\n 'name' : 'cim_quality',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'The starting point for a quality record. It can contain any number of issues and reports. An issue is an open-ended description of some issue about a CIM instance. A record is a prescribed description of some specific quantitative measure that has been applied to a CIM instance.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('reports', 'quality.report', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:cIM_Quality'),\n ('reports', 'child::cim:report'),\n ]\n }", "def set_printer_quality(self, level):\n self._info(\"set_quality\")\n self.parent.printer.set_quality(level)", "def apply_quality(grid,metric):\n\n qualityFilter=vtk.vtkMeshQuality()\n qualityFilter.SetInputData(grid)\n\n metric_fun_name = 'SetTetQualityMeasureTo'+metric\n\n getattr(qualityFilter, metric_fun_name)()\n\n qualityFilter.Update()\n\n\n return qualityFilter", "def GachaCraftNodeExcelAddNodeQuality(builder, NodeQuality):\n return AddNodeQuality(builder, NodeQuality)", "def test_saturation_mixing_ratio_dimensions():\n p = 998. * units.mbar\n temp = 20 * units.celsius\n assert str(saturation_mixing_ratio(p, temp).units) == 'dimensionless'", "def _quality_severity_type():\n return {\n 'name' : 'quality_severity_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('cosmetic', None),\n ('minor', None),\n ('major', None),\n ],\n }", "def wrap_quality_descriptor(self, overflow, blocked, substituted, not_topical, invalid):\n if not overflow in [0,1]:\n return \"ERROR: Overflow bit has to be 0 or 1.\"\n if not blocked in [0,1]:\n return \"ERROR: Blocked bit has to be 0 or 1.\"\n if not substituted in [0,1]:\n return \"ERROR: Substituted bit has to be 0 or 1.\"\n if not not_topical in [0,1]:\n return \"ERROR: Not topical bit has to be 0 or 1.\"\n if not invalid in [0,1]:\n return \"ERROR: Invalid bit has to be 0 or 1.\"\n bl = 16 if blocked == 1 else 0\n sb = 32 if substituted == 1 else 0\n nt = 64 if not_topical == 1 else 0\n iv = 128 if invalid == 1 else 0\n return struct.pack('<B', overflow + bl + sb + nt + iv)", "def post_hoc_mask(Q_neff_map, U_neff_map, Q_sigma_map, U_sigma_map, mask_filename,\n neff_thresh=10000, sigma_thresh=0.7):\n\n mask = np.ones(len(Q_neff_map))\n mask[Q_neff_map < neff_thresh] = 0\n mask[U_neff_map < neff_thresh] = 0\n frac = len(mask[mask == 0]) / len(mask)\n print(f'Fraction masked after neff = {frac}')\n \n assert 0 <= sigma_thresh <= 1\n\n if sigma_thresh != 1.0:\n \n bw = 2 * iqr(Q_sigma_map) / len(Q_sigma_map)**(1 / 3)\n num_bins = int((np.amax(Q_sigma_map) - np.amin(Q_sigma_map)) / bw)\n hist, bins = np.histogram(Q_sigma_map, bins=num_bins, normed=True)\n dx = bins[1] - bins[0]\n qcum = np.cumsum(hist) * dx\n\n qspline = interp1d(bins[1:], qcum, kind='cubic', fill_value='extrapolate')\n qsol = root_scalar(lambda x: qspline(x) - sigma_thresh, x0 = np.mean(Q_sigma_map), method='bisect',\n bracket=[np.amin(bins[1:]), np.amax(bins[1:])])\n\n bw = 2 * iqr(U_sigma_map) / len(U_sigma_map)**(1 / 3)\n num_bins = int((np.amax(U_sigma_map) - np.amin(U_sigma_map)) / bw)\n hist, bins = np.histogram(U_sigma_map, bins=num_bins, normed=True)\n dx = bins[1] - bins[0]\n ucum = np.cumsum(hist) * dx\n\n uspline = interp1d(bins[1:], ucum, kind='cubic', fill_value='extrapolate')\n usol = root_scalar(lambda x: uspline(x) - sigma_thresh, x0 = np.mean(U_sigma_map), method='bisect',\n bracket=[np.amin(bins[1:]), np.amax(bins[1:])])\n\n mask[Q_sigma_map > qsol.root] = 0\n mask[U_sigma_map > usol.root] = 0\n\n hp.write_map(mask_filename, mask, overwrite=True)\n\n return mask", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def quality(self) -> int:\n return self._quality", "def test_scale_image_with_dq(self):\n outfile = self.filename.replace('test.fits', 'out_masked.fits')\n parsedq = DQParser(\n get_pkg_data_filename('data/dqflags_jwst.txt', package='stginga'))\n scale_image_with_dq(\n self.filename, outfile, 0.5, parsedq, kernel_width=5,\n sci_ext='SCI', dq_ext='DQ', bad_flag=self.bad_flag,\n ignore_edge_pixels=1)\n ans = [[0, 2, 5, 7, 9],\n [22, 23, 27, 30, 31],\n [45, 46, 37, 51, 54],\n [68, 71, 83, 75, 77],\n [90, 92, 95, 97, 99]]\n with fits.open(outfile) as pf:\n assert_allclose(pf[0].data, ans)" ]
[ "0.6650363", "0.5603464", "0.55853474", "0.55749416", "0.5567535", "0.5443825", "0.53447133", "0.53097266", "0.53067493", "0.52518195", "0.5240438", "0.518615", "0.5174121", "0.5170726", "0.51531535", "0.50274515", "0.50269926", "0.50269127", "0.50144744", "0.50123453", "0.5010792", "0.49925354", "0.49877977", "0.49793273", "0.49675903", "0.49406394", "0.4934996", "0.4931521", "0.49310726", "0.49231192" ]
0.7043028
0
True if element is \\s or \n
def is_sentence_end(mystem_element): word = mystem_element.get('text', '') return word == '\\s' or word == '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False", "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False", "def _is_whitespace(char):\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True # pragma: no cover\n return False", "def isspace(self) -> bool:\n pass", "def _is_whitespace(char):\n if char == ' ' or char == '\\t' or char == '\\n' or char == '\\r':\n return True\n cat = unicodedata.category(char)\n if cat == 'Zs':\n return True\n return False", "def determine_if_whitespace(self):\n value = self.current.value\n\n if value == \"\\n\":\n self.is_space = True\n else:\n self.is_space = False\n if value == \"\" or regexes[\"whitespace\"].match(value):\n self.is_space = True", "def _has_newline(line) -> bool:\n if line and (\"\\r\" in line or \"\\n\" in line):\n return True\n return False", "def test_with_newline(self):\n self.assertEqual(escapespaces('Hi there\\n'),\n 'Hi&nbsp; there<br />')", "def has_whitespace(string):\n temp = string.split()\n return len(temp) > 1", "def has_whitespace(s):\n for c in string.whitespace:\n if c in s:\n return True\n return False", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def isspace(a):\n return _vec_string(a, bool_, 'isspace')", "def isspace(self):\n return isspace(self)", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def ends_paragraph(s: str) -> bool:\n return not s.strip()", "def is_whitespace(self) -> bool:\n return all(seg.is_whitespace for seg in self.segments)", "def is_unicode(space, w_obj):\n return space.wrap(True)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_blank(line):\n return line.strip(\" \\t\") == \"\\n\"", "def test_assert_does_not_contain_newline(self):\n\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api._assert_does_not_contain(\"There is a newline (\\n) in this string.\", \"\\n\", \"quote\")", "def eol(self):\n return self.pos == len(self.tokens)", "def is_void(line):\n\n for i in line:\n if i!=' ' and i!='\\t' and i!='\\n':\n return False\n return True", "def _is_blank_line(self):\n pattern = re.compile(r\"^(\\s)*$\")\n return pattern.search(self._line)", "def IsMultiline(self):\r\n\r\n return \"\\n\" in self.caption", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def has_whitespaces(string):\n if not isinstance(string, STRTYPE):\n print(str(string) + \" (\" + str(type(string)) + \") is not a string!\")\n return False\n has_ws = False\n for char in string:\n has_ws |= char.isspace()\n return has_ws", "def have_space_symbol(l):\r\n if \" \" in str(l):\r\n return 1\r\n else:\r\n return 0", "def is_word(mystem_element):\n word = mystem_element.get('text', '')\n if len(word.strip()) > 0:\n return True\n return False", "def is_blank(text):\n all([c.isspace() for c in text])", "def is_eof(line):\n return line == \"\"" ]
[ "0.6634618", "0.65383315", "0.65339756", "0.6481999", "0.63937706", "0.6269705", "0.6221383", "0.59168893", "0.5899913", "0.5853028", "0.5779206", "0.5760287", "0.57525533", "0.56327254", "0.56196755", "0.5581376", "0.5564806", "0.5559176", "0.5556629", "0.5535566", "0.5502369", "0.55013347", "0.5482385", "0.54755616", "0.5454494", "0.54386824", "0.54277176", "0.54090637", "0.5392859", "0.5391331" ]
0.683654
0
wrap for morpho_doc with local session
def morpho_doc2(doc_id): db.doc_apply(doc_id, morpho_doc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def document(self):\n ...", "def __init__(self, temboo_session):\n super(DownloadDocument, self).__init__(temboo_session, '/Library/Zoho/Writer/DownloadDocument')", "def __call__(self, doc):\n return doc", "def generate_docs(root_dir, session):\n ...", "def documento():\r\n\tpass", "def __init__(self, blip_data, context):\n super(OpBasedDocument, self).__init__(blip_data)\n self.__context = context", "def build_document(self):\n pass", "def new_document(self) -> nodes.document:\n document = super().new_document()\n document.__class__ = addnodes.document # replace the class with patched version\n\n # substitute transformer\n document.transformer = SphinxTransformer(document)\n document.transformer.set_environment(self.settings.env)\n\n # substitute reporter\n reporter = document.reporter\n document.reporter = LoggingReporter.from_reporter(reporter)\n\n return document", "def find_document(self):\n pass", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def session(self):", "def _document(self, node):\n doc = Document(self._text(node, 'id', ifnone=None))\n doc.metadata = self.infon_dict(node)\n doc.relations = self._get_relations(node)\n\n offset_mngr = self._offset_mngr()\n for passage in self._iterfind(node, 'passage'):\n type_, text, offset, infon, anno = self._section(passage,\n offset_mngr)\n section = doc.add_section(type_, text, offset, anno, entity_offset=0)\n section.metadata = infon\n section.relations = self._get_relations(passage)\n # Get infon elements and relations at sentence level.\n for sent, sent_node in zip(section,\n self._iterfind(passage, 'sentence')):\n sent.metadata = self.infon_dict(sent_node)\n sent.relations = self._get_relations(sent_node)\n\n doc.sanitize_relations()\n\n return doc", "def edit_document():", "def dummy(doc):\r\n return doc", "def setup_document(document_name=\"fSCAD-Preview\"):\n preview_doc = None\n saved_camera = None\n saved_units = None\n for document in app().documents:\n if document.name == document_name:\n preview_doc = document\n break\n if preview_doc is not None:\n preview_doc.activate()\n saved_camera = app().activeViewport.camera\n saved_units = design().fusionUnitsManager.distanceDisplayUnits\n preview_doc.close(False)\n\n preview_doc = app().documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)\n preview_doc.name = document_name\n preview_doc.activate()\n if saved_camera is not None:\n is_smooth_transition_bak = saved_camera.isSmoothTransition\n saved_camera.isSmoothTransition = False\n app().activeViewport.camera = saved_camera\n saved_camera.isSmoothTransition = is_smooth_transition_bak\n app().activeViewport.camera = saved_camera\n if saved_units is not None:\n design().fusionUnitsManager.distanceDisplayUnits = saved_units\n design().designType = adsk.fusion.DesignTypes.DirectDesignType", "def _to_document(self, document):\n obj = self.document()\n obj._set_from_db(document)\n return obj", "def GetDocument(self, *args, **kwargs):\n pass", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def __init__(self, temboo_session):\n super(EntityOverview, self).__init__(temboo_session, '/Library/InfluenceExplorer/EntityOverview')", "def _create_infer_session(self):\r\n model_session = mslite.Model()\r\n model_session.build_from_file(self.model_file,\r\n self.model_type,\r\n self.context)\r\n return model_session", "def run_local_doc():\n\tcfg = settings.LocalConfig()\n\tapp = make_app(blueprints.developer_portal, settings.LocalConfig)\n\tapp.run(host = cfg.SERVERNAME, port = cfg.DOC_PORT, debug = True)", "def get_kml_document(kml_obj: fastkml.kml.KML) -> fastkml.Document:\n\t\n\treturn next(kml_obj.features())", "def __init__(self, session):\n self._session = session", "def _getForDocument (self):\n return self.__forDocument", "def morpho_doc(doc):\n doc_text = doc.stripped\n mystem_analyzer.start()\n # new_morpho = mystem_analyzer.analyze(doc_text)\n new_morpho = mystem_analyzer.analyze(doc_text.replace('\\n',''))\n\n morpho_list = []\n\n for element in new_morpho: # разрезаем\n\n if is_sentence_end(element):\n morpho_list.append(element)\n else:\n\n line = element.get('text', '')\n\n space_len = 0\n\n word_start = -1\n word_len = 0\n\n symbol_number = -1\n for symbol in line:\n\n symbol_number+=1\n\n if symbol == \"'\" or symbol == '\"' or symbol == '»' or symbol == '«':\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n # добавим кавычку\n new_element = {'text': symbol}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n elif symbol == \" \":\n\n if word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n space_len += 1\n\n else:\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n if word_start == -1:\n word_start = symbol_number\n word_len = 1\n else:\n word_len += 1\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n\n morpho_list.append(new_element)\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n for i in range(len(morpho_list) - 1): # переставляем\n if i > 0:\n if morpho_list[i - 1]['text'] == ' ' and morpho_list[i]['text'] == '\"' and morpho_list[i + 1]['text'] == '\\\\s':\n morpho_list[i], morpho_list[i + 1] = morpho_list[i + 1], morpho_list[i]\n\n sentence_index = 0\n word_index = 0\n start_offset = 0\n\n for element in morpho_list: # нумеруем\n if is_sentence_end(element):\n if word_index != 0:\n sentence_index += 1\n word_index = 0\n else:\n line = element.get('text', '')\n line_len = len(line)\n\n if(line[0]!=' '):\n element['start_offset'] = start_offset\n element['end_offset'] = start_offset + line_len - 1\n element['word_index'] = word_index\n element['sentence_index'] = sentence_index\n\n word_index += 1\n start_offset += line_len\n\n doc.morpho = morpho_list\n mystem_analyzer.close()", "def __init__(self, temboo_session):\n super(SearchByReviewer, self).__init__(temboo_session, '/Library/NYTimes/MovieReviews/SearchByReviewer')", "def get_document(self):\n return self.document", "def export_docs(fp, app_name):\n from otree.models import Session\n from otree.models import Participant\n from otree.views.admin import get_all_fields\n\n # generate doct_dict\n models_module = get_models_module(app_name)\n\n model_names = [\"Participant\", \"Player\", \"Group\", \"Subsession\", \"Session\"]\n line_break = '\\r\\n'\n\n def choices_readable(choices):\n lines = []\n for value, name in choices:\n # unicode() call is for lazy translation strings\n lines.append(u'{}: {}'.format(value, six.text_type(name)))\n return lines\n\n def generate_doc_dict():\n doc_dict = OrderedDict()\n\n data_types_readable = {\n 'PositiveIntegerField': 'positive integer',\n 'IntegerField': 'integer',\n 'BooleanField': 'boolean',\n 'CharField': 'text',\n 'TextField': 'text',\n 'FloatField': 'decimal',\n 'DecimalField': 'decimal',\n 'CurrencyField': 'currency'}\n\n for model_name in model_names:\n if model_name == 'Participant':\n Model = Participant\n elif model_name == 'Session':\n Model = Session\n else:\n Model = getattr(models_module, model_name)\n\n field_names = set(field.name for field in Model._meta.fields)\n\n members = get_all_fields(Model, for_export=True)\n doc_dict[model_name] = OrderedDict()\n\n for member_name in members:\n member = getattr(Model, member_name, None)\n doc_dict[model_name][member_name] = OrderedDict()\n if member_name == 'id':\n doc_dict[model_name][member_name]['type'] = [\n 'positive integer']\n doc_dict[model_name][member_name]['doc'] = ['Unique ID']\n elif member_name in field_names:\n member = Model._meta.get_field_by_name(member_name)[0]\n\n internal_type = member.get_internal_type()\n data_type = data_types_readable.get(\n internal_type, internal_type)\n\n doc_dict[model_name][member_name]['type'] = [data_type]\n\n # flag error if the model doesn't have a doc attribute,\n # which it should unless the field is a 3rd party field\n doc = getattr(member, 'doc', '[error]') or ''\n doc_dict[model_name][member_name]['doc'] = [\n line.strip() for line in doc.splitlines()\n if line.strip()]\n\n choices = getattr(member, 'choices', None)\n if choices:\n doc_dict[model_name][member_name]['choices'] = (\n choices_readable(choices))\n elif isinstance(member, collections.Callable):\n doc_dict[model_name][member_name]['doc'] = [\n inspect.getdoc(member)]\n return doc_dict\n\n def docs_as_string(doc_dict):\n\n first_line = '{}: Documentation'.format(app_name_format(app_name))\n second_line = '*' * len(first_line)\n\n lines = [\n first_line, second_line, '',\n 'Accessed: {}'.format(datetime.date.today().isoformat()), '']\n\n app_doc = getattr(models_module, 'doc', '')\n if app_doc:\n lines += [app_doc, '']\n\n for model_name in doc_dict:\n lines.append(model_name)\n\n for member in doc_dict[model_name]:\n lines.append('\\t{}'.format(member))\n for info_type in doc_dict[model_name][member]:\n lines.append('\\t\\t{}'.format(info_type))\n for info_line in doc_dict[model_name][member][info_type]:\n lines.append(u'{}{}'.format('\\t' * 3, info_line))\n\n output = u'\\n'.join(lines)\n return output.replace('\\n', line_break).replace('\\t', ' ')\n\n doc_dict = generate_doc_dict()\n doc = docs_as_string(doc_dict)\n fp.write(doc)", "def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')" ]
[ "0.6169525", "0.6089714", "0.5754191", "0.5680773", "0.5625634", "0.5560101", "0.5555134", "0.5552288", "0.54824644", "0.54459333", "0.54459333", "0.54404557", "0.54239726", "0.5403701", "0.5398525", "0.5373541", "0.52517116", "0.52360654", "0.5235492", "0.52008027", "0.51755744", "0.51173806", "0.50972277", "0.5095104", "0.50725454", "0.5070352", "0.50659996", "0.5050931", "0.5047698", "0.504437" ]
0.6099428
1
morphological analysis for document
def morpho_doc(doc): doc_text = doc.stripped mystem_analyzer.start() # new_morpho = mystem_analyzer.analyze(doc_text) new_morpho = mystem_analyzer.analyze(doc_text.replace('\n','')) morpho_list = [] for element in new_morpho: # разрезаем if is_sentence_end(element): morpho_list.append(element) else: line = element.get('text', '') space_len = 0 word_start = -1 word_len = 0 symbol_number = -1 for symbol in line: symbol_number+=1 if symbol == "'" or symbol == '"' or symbol == '»' or symbol == '«': if space_len > 0: # добавим пробелы cur_space = ' ' * space_len new_element = {'text': cur_space} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) space_len = 0 elif word_start > -1: # добавим слово cur_word = line[word_start:(word_start + word_len)] new_element = {'text': cur_word} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) word_start = -1 word_len = 0 # добавим кавычку new_element = {'text': symbol} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) elif symbol == " ": if word_start > -1: # добавим слово cur_word = line[word_start:(word_start + word_len)] new_element = {'text': cur_word} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) word_start = -1 word_len = 0 space_len += 1 else: if space_len > 0: # добавим пробелы cur_space = ' ' * space_len new_element = {'text': cur_space} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) space_len = 0 if word_start == -1: word_start = symbol_number word_len = 1 else: word_len += 1 if space_len > 0: # добавим пробелы cur_space = ' ' * space_len new_element = {'text': cur_space} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) elif word_start > -1: # добавим слово cur_word = line[word_start:(word_start + word_len)] new_element = {'text': cur_word} if 'analysis' in element: new_element['analysis'] = element['analysis'] morpho_list.append(new_element) for i in range(len(morpho_list) - 1): # переставляем if i > 0: if morpho_list[i - 1]['text'] == ' ' and morpho_list[i]['text'] == '"' and morpho_list[i + 1]['text'] == '\\s': morpho_list[i], morpho_list[i + 1] = morpho_list[i + 1], morpho_list[i] sentence_index = 0 word_index = 0 start_offset = 0 for element in morpho_list: # нумеруем if is_sentence_end(element): if word_index != 0: sentence_index += 1 word_index = 0 else: line = element.get('text', '') line_len = len(line) if(line[0]!=' '): element['start_offset'] = start_offset element['end_offset'] = start_offset + line_len - 1 element['word_index'] = word_index element['sentence_index'] = sentence_index word_index += 1 start_offset += line_len doc.morpho = morpho_list mystem_analyzer.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots", "def urdu_morph_analyze(word,urdu_morfessor_model,urdu_script_check_re):\n\n def urdu_morphanalysis_needed(word):\n return urdu_script_check_re.match(word) \n\n m_list=[]\n if urdu_morphanalysis_needed(word): \n val=urdu_morfessor_model.viterbi_segment(word)\n m_list=val[0]\n else:\n m_list=[word]\n return m_list", "def __extract_morphological_information(self, mrph_object, is_feature, is_surface):\n assert isinstance(mrph_object, pyknp.Morpheme)\n assert isinstance(is_feature, bool)\n assert isinstance(is_surface, bool)\n\n surface = mrph_object.midasi\n word_stem = mrph_object.genkei\n\n tuple_pos = (mrph_object.hinsi, mrph_object.bunrui)\n\n misc_info = {\n 'katuyou1': mrph_object.katuyou1,\n 'katuyou2': mrph_object.katuyou2,\n 'imis': mrph_object.imis,\n 'repname': mrph_object.repname\n }\n\n token_object = TokenizedResult(\n node_obj=None,\n tuple_pos=tuple_pos,\n word_stem=word_stem,\n word_surface=surface,\n is_feature=is_feature,\n is_surface=is_surface,\n misc_info=misc_info\n )\n\n return token_object", "def get_morpho_synt_stats(self,s,t,i): \n put_feature_value_list(self.stats,\"pos_unigr\", t.pos)\n\n #Verbs\n \n if t.pos == \"VB\":\n\n if not self.stats[\"finite\"]:\n if (\"INF\" not in t.msd) and (\"SUP\" not in t.msd) and (\"PRF\" not in t.msd):\n # only modal verb as finite verb without VG not allowed\n if t.lemma: \n if t.lemma[0] in self.modal_verb_list: #få, ska sometimes non-modal use\n try:\n ch_deprel = [tt.deprel for tt in self.stats[\"heads\"][t.ref]]\n if \"VG\" in ch_deprel:\n put_feature_value_list(self.stats, \"finite\", 1.0)\n except KeyError:\n pass\n else:\n put_feature_value_list(self.stats, \"finite\", 1.0)\n else:\n put_feature_value_list(self.stats, \"finite\", 1.0)\n\n\n if t.deprel not in [\"VG\", \"SP\"]: #SP e.g. är öppen\n put_feature_value(self.stats, \"main_verb\", 1.0)\n if t.lemma:\n # check next word if verb (also non-modal use of those verbs) \n if t.lemma[0] in self.modal_verb_list:\n try:\n if s.nodes[i+1].deprel == \"VG\":\n put_feature_value_list(self.stats, \"modal_verb\", t.word)\n except IndexError: \n for w in s.nodes[i:]:\n if (w.pos == \"VB\" and w.deprel == \"VG\" \n and w.depheadid == t.ref):\n put_feature_value_list(self.stats, \"modal_verb\", t.word)\n \n if \"SFO\" in t.msd:\n if t.lemma[0][-1] == \"s\": # e.g. finns\n put_feature_value_list(self.stats, \"sverb\", t.word)\n else:\n put_feature_value(self.stats, \"passive\", 1.0)\n if t.msd[:6] == \"PC.PRF\":\n put_feature_value(self.stats, \"perf_pc\", 1.0)\n if t.msd[:6] == \"PC.PRS\":\n put_feature_value(self.stats, \"pres_pc\", 1.0)\n if \"PRT\" in t.msd:\n put_feature_value(self.stats, \"past_VB\", 1.0)\n elif \"PRS\" in t.msd:\n put_feature_value(self.stats, \"pres_VB\", 1.0)\n elif \"SUP\" in t.msd:\n put_feature_value(self.stats, \"sup_VB\", 1.0)\n if \"IMP\" in t.msd:\n put_feature_value(self.stats, \"imp_VB\", 1.0)\n if \"KON\" in t.msd:\n put_feature_value(self.stats, \"konj_VB\", 1.0)\n\n\n if t.word in [\"han\", \"hon\", \"det\", \"den\"]:\n put_feature_value(self.stats, \"PN_3SG\", 1.0)\n\n if t.pos == \"NN\" and (\"NEU\" in t.msd):\n put_feature_value(self.stats, \"neu_NN\", 1.0)\n\n # Relative strucutres (pronouns etc.)\n if t.pos in [\"HA\", \"HD\", \"HP\", \"HS\"]:\n if s.nodes[-1].word != \"?\": # to exclude interrogative use of those \n # (but indirect questions not handled...)\n put_feature_value(self.stats, \"rel_str\", 1.0)\n\n return self.stats", "def _morphophonemics_of(entry: _LexiconEntry) -> str:\n return entry[\"morphophonemics\"]", "def analyse(self):\n pass", "def applyMorphologicalCleaning(self, image):", "def analyse_document(dom, arguments):\n model = dom.getElementsByTagName(\"model\")[0]\n return analyse_model(model, arguments)", "def morpho_doc2(doc_id):\n db.doc_apply(doc_id, morpho_doc)", "def detect_document(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n #print('\\nBlock confidence: {}\\n'.format(block.confidence))\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n word_text = ''.join([symbol.text for symbol in word.symbols])\n text.append(word_text.encode('utf-8'))\n #print(word_text)", "def process(self, doc: Doc) -> Doc:\n\n matches = self.get_matches(doc)\n\n terminations = get_spans(matches, \"termination\")\n boundaries = self._boundaries(doc, terminations)\n\n # Removes duplicate matches and pseudo-expressions in one statement\n matches = filter_spans(matches, label_to_remove=\"pseudo\")\n\n entities = list(self.get_spans(doc))\n ents = None\n\n for start, end in boundaries:\n\n ents, entities = consume_spans(\n entities,\n filter=lambda s: check_inclusion(s, start, end),\n second_chance=ents,\n )\n\n sub_matches, matches = consume_spans(\n matches, lambda s: start <= s.start < end\n )\n\n if self.on_ents_only and not ents:\n continue\n\n sub_preceding = get_spans(sub_matches, \"preceding\")\n sub_following = get_spans(sub_matches, \"following\")\n # Verbs preceding negated content\n sub_preceding += get_spans(sub_matches, \"verbs_preceding\")\n # Verbs following negated content\n sub_following += get_spans(sub_matches, \"verbs_following\")\n\n if not sub_preceding + sub_following:\n continue\n\n if not self.on_ents_only:\n for token in doc[start:end]:\n token._.negation = any(\n m.end <= token.i for m in sub_preceding\n ) or any(m.start > token.i for m in sub_following)\n\n for ent in ents:\n self.annotate_entity(\n ent=ent,\n sub_preceding=sub_preceding,\n sub_following=sub_following,\n )\n\n return doc", "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "def inference(self):\n for m, doc in enumerate(self.docs):\n # Be careful followings are views\n # So self.hoge will be change, when changing variant\n zs_j = self.zs_m_j[m]\n zk_j = self.zk_m_j[m]\n n_m_zs = self.n_m_zs[m]\n n_m_zk = self.n_m_zk[m]\n for j, t in enumerate(doc):\n # discount for n-th word t with topic z\n zs = zs_j[j]\n zk = zk_j[j]\n n_m_zs[zs] -= 1\n n_m_zk[zs, zk] -= 1\n self.n_zk_t[zk, t] -= 1\n self.n_zk[zk] -= 1\n\n # sampling topic new_z for t\n \"\"\"\n n_s = n_m_zs + self.alphas # mth doc, S vec\n p_s = n_s / np.sum(n_s)\n n_k = n_m_zk + self.alphask # mth doc, SxK matrix\n p_k = n_k / n_s.reshape(len(n_s), 1)\n n_v = self.n_zk_t[:, t] + self.beta\n p_v = n_v / (self.n_zk + self.beta)\n\n p_zsk = p_s.reshape(len(p_s), 1) * p_k * p_v # SxK matrix\n \"\"\"\n\n p_zsk = (n_m_zk + self.alphask) * self.n_zk_t[:, t] \\\n / (np.sum(n_m_zs + self.alphas) * self.n_zk)\n\n p_zs = np.sum(p_zsk, axis=1) / np.sum(p_zsk)\n p_zk = np.sum(p_zsk, axis=0) / np.sum(p_zsk)\n\n new_zs = np.random.multinomial(1, p_zs).argmax()\n new_zk = np.random.multinomial(1, p_zk).argmax()\n\n # print(\"arg\", np.argmax(p_s), np.argmax(p_k, axis=1),\n # np.argmax(p_k, axis=0), np.argmax(p_zk))\n # print('probs', p_s, p_zs)\n # print('probk', p_k, p_zk)\n # print('old', zs, zk)\n # print('new', new_zs, new_zk)\n\n # set z the new topic and increment counters\n zs_j[j] = new_zs\n zk_j[j] = new_zk\n n_m_zs[new_zs] += 1\n n_m_zk[new_zs, new_zk] += 1\n self.n_zk_t[new_zk, t] += 1\n self.n_zk[new_zk] += 1", "def load_morphoit(self, path=None):\n path = path or self.path\n # f = open(path,'r')\n f = codecs.open(path, 'r', 'latin-1')\n lines = f.readlines()\n f.close()\n self.lemma_dict.clear()\n self.tag_dict.clear()\n self.suffix_dict.clear()\n self.word_tag_dict.clear()\n n_entries = 0\n for line in lines:\n line = line.strip()\n if line:\n # entry = line.split()\n entry = line.split(u'\\u0009') # entry has TAB delimited items\n n = len(entry)\n word = entry[0]\n if n > 1:\n lemma = entry[1]\n else:\n lemma = 'lemma?'\n if n > 2:\n tags = entry[2]\n if tags:\n splitted_tags = tags.split(u':')\n tag = splitted_tags[0]\n if tag == u'SMI': # smile ?\n continue\n else:\n tag = 'tag?'\n n_entries += 1\n if self.use_defaultdict:\n self.lemma_dict[lemma].append(word)\n self.tag_dict[tag] += 1\n self.word_tag_dict[word][tag] += 1\n else:\n words = self.lemma_dict.get(lemma, []); words.append(word); self.lemma_dict[lemma] = words\n self.tag_dict[tag] = self.tag_dict.get(tag, 0) + 1\n dict = self.word_tag_dict.get(word, {})\n \"\"\" only occurrence in corpus will increment the counter !\n dict[tag] = dict.get(tag, 0) + 1\n \"\"\"\n if dict.get(tag, None) is None:\n dict[tag] = 0\n self.word_tag_dict[word] = dict\n length = len(word)\n reversed = util.reverse(word)\n max = min(length, MAX_SUFFIX)\n for i in range(MIN_SUFFIX, max):\n suffix = reversed[:i]\n if self.use_defaultdict:\n self.suffix_dict[suffix][tag] += 1\n else: # defaultdicts cannot be pickled !\n dict = self.suffix_dict.get(suffix, {})\n dict[tag] = dict.get(tag, 0) + 1\n self.suffix_dict[suffix] = dict\n return len(lines), n_entries", "def lemmas_freq_doc(doc):\n lemmas = {}\n morpho = doc.morpho\n for i in morpho:\n # if this is a word\n if 'analysis' in i.keys():\n # if there is few lex\n if len(i['analysis']):\n for l in i.get('analysis', []):\n if l.get('lex', False):\n if (not l['lex'] in stop_lemmas) & (l.get('wt', 0) > 0):\n lemmas[l['lex']] = lemmas.get(l['lex'], 0) + l.get('wt', 1)\n else:\n # english word or number or smth like this\n word = i.get('text', '')\n # take word, don't take number\n if (len(word) > 0) and not word.isdigit():\n lemmas[word] = lemmas.get(word, 0) + 1\n doc.lemmas = lemmas", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def analyse ( self ) :\n \n ## get all B0 particles\n bs1 = self.gselect ( 'bs1' , \"[ Beauty => ( D_s+ ==> K- K+ pi+ ) K-]CC \")\n bs2 = self.gselect ( 'bs2' , \"[ Beauty -> ( D_s+ --> K- K+ pi+ ) K-]CC \")\n \n cnt = self.counter(\"#1 + photos \")\n cnt += bs1.size()\n \n cnt = self.counter(\"#2 - photos \")\n cnt += bs2.size()\n\n if len(bs1) != len(bs2) :\n self.Warning(\" FOUND!!!!\" , SUCCESS )\n for b in bs1:\n print ' With PHOTOS: ', b.decay() , b.barcode()\n for b in bs2:\n print ' Without PHOTOS: ', b.decay() , b.barcode()\n \n \n return SUCCESS # RETURN ", "def startmorph(self):\n cursor_pos = 0\n clrR = QColor(255, 0, 0, 255)\n clrB = QColor(0, 0, 0, 255)\n cursor = self.textEdit_morph.textCursor()\n\n if self.radioButton_image.isChecked():\n self.morph_state[0][0] = self.checkBox_morph.isChecked()\n else:\n self.morph_state[1][0] = self.checkBox_morph.isChecked()\n\n for element in self.textEdit_morph.toPlainText().split('\\n'):\n # split the full textEdit_morph up into newlines\n starter = element.split(' ')\n # split the newlines up into words\n if starter[0] in self.valid_ops:\n # we color it black, in case it previously has been colored red.\n # could color all black on each iteration and recolor all the reds. too bad!\n cursor.setPosition(cursor_pos)\n cursor.movePosition(20, 1, 1)\n self.textEdit_morph.setTextCursor(cursor)\n self.textEdit_morph.setTextColor(clrB)\n cursor_pos += len(element) + 1\n # print(cursor_pos)\n cursor.setPosition(0)\n # calls to setTextCursor are made to move the cursor to the actual position on screen\n self.textEdit_morph.setTextCursor(cursor)\n else:\n # maybe move next 7 lines into morp, and call it for this 1 and the previous 1?\n cursor.setPosition(cursor_pos)\n cursor.movePosition(20, 1, 1)\n self.textEdit_morph.setTextCursor(cursor)\n self.textEdit_morph.setTextColor(clrR)\n cursor_pos += len(element) + 1\n cursor.setPosition(0)\n self.textEdit_morph.setTextCursor(cursor)\n print(\"something is wrong!!\")\n self.checkBox_morph.setChecked(False)\n\n if self.radioButton_circle.isChecked():\n self.morph_state[1][1] = self.textEdit_morph.toPlainText()\n else:\n self.morph_state[0][1] = self.textEdit_morph.toPlainText()\n\n self.update_all_things()", "def _prepare_analysis_input(self, documents):\n subdoc_to_doc_map = {}\n wordtype_to_number = {}\n number_to_wordtype = []\n wordtypes = {}\n \n # prevent duplicating work\n if os.path.exists(self.wordtype_file):\n return\n \n try:\n # First find singletons\n if self.remove_singletons:\n word_type_count_threshold = max(1, int(math.log(documents.count(), 10)) - 2)\n temp_word_type_counts = {}\n for doc_index, doc in enumerate(documents):\n tokens = self.tokenize(doc.get_content())\n for token, token_start in tokens:\n temp_word_type_counts[token] = temp_word_type_counts.setdefault(token, 0) + 1\n for word_type, count in temp_word_type_counts.iteritems(): # add singletons to stopword list\n if count <= word_type_count_threshold:\n self._excluded_words[word_type] = True\n with io.open(self.excluded_words_file, 'w', encoding='utf-8') as ex_f:\n ex_f.write(unicode(json.dumps(self._excluded_words)))\n \n haltwords = dict(self.stopwords)\n haltwords.update(self._excluded_words)\n # Second find bigrams, iterate through documents and train.\n if self.find_bigrams:\n from import_tool.analysis.bigram_finder import BigramFinder\n bigram_finder = BigramFinder(stopwords=haltwords)\n for doc_index, doc in enumerate(documents):\n bigram_finder.train(doc_index, self.tokenize(doc.get_content()))\n bigram_finder.print()\n \n # Third, we're going to stem words\n if self.stem_words:\n from import_tool.analysis.stemmer import Stemmer\n stemmer = Stemmer(self._working_dir, self.base_dir)\n \n # for each document tokenize and map tokens to numbers to avoid regex problems before passing data to Mallet\n with io.open(self.mallet_input_file, 'w', encoding='utf-8') as w:\n with io.open(self.start_index_file, 'w', encoding='utf-8') as w2:\n count = 0\n subcount = 0\n for doc_index, doc in enumerate(documents):\n doc_content = unicode(doc.get_content())\n count += 1\n subdocuments = self.create_subdocuments(doc_index, doc_content)\n token_start_index_offset = 0 # needed to make sure the start index remains correct once the document is re-merged\n for subdoc_name, subdoc_content in subdocuments:\n if subcount > 0:\n w2.write(u'\\n')\n subcount += 1\n subdoc_to_doc_map[subdoc_name] = doc_index\n tokens = self.tokenize(subdoc_content)\n \n if self.find_bigrams:\n tokens = bigram_finder.combine(tokens, subdoc_content)\n \n token_numbers = []\n token_start_indices = []\n only_tokens = []\n tokens_temp = []\n for tok, tok_start in tokens:\n only_tokens.append(tok)\n tokens_temp.append([tok, tok_start + token_start_index_offset])\n tokens = tokens_temp\n tokens_temp = None\n if self.stem_words:\n stemmed_tokens = stemmer.stem(only_tokens)\n else:\n stemmed_tokens = only_tokens\n for tup, tok_stem in zip(tokens, stemmed_tokens):\n tok, tok_start = tup\n wordtypes[tok] = True\n wordtypes[tok_stem] = True\n try:\n tok_num = wordtype_to_number[tok_stem]\n except:\n tok_num = len(wordtype_to_number)\n number_to_wordtype.append(tok_stem)\n wordtype_to_number[tok_stem] = tok_num\n token_numbers.append(unicode(tok_num))\n token_start_indices.append([tok, tok_start])\n text = u' '.join(token_numbers)\n w.write(u'{0} all {1}\\n'.format(subdoc_name, text))\n w2.write(unicode(json.dumps(token_start_indices)))\n token_start_index_offset += len(subdoc_content)\n for tok, tok_start in tokens:\n try:\n assert doc_content[tok_start:tok_start+len(tok)].lower() == tok.lower()\n except:\n print(tok_start)\n print(len(tok))\n print('\"'+doc_content[tok_start:tok_start+len(tok)].lower()+'\"')\n print('\"'+tok.lower()+'\"')\n raise\n if not count:\n raise Exception('No files processed.')\n # record which subdocuments belong to which documents\n with io.open(self.subdoc_to_doc_map_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(subdoc_to_doc_map)))\n with io.open(self.wordtype_to_number_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtype_to_number)))\n with io.open(self.number_to_wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(number_to_wordtype)))\n with io.open(self.wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtypes)))\n except: # cleanup\n self._cleanup(self.mallet_input_file)\n self._cleanup(self.subdoc_to_doc_map_file)\n self._cleanup(self.wordtype_to_number_file)\n self._cleanup(self.number_to_wordtype_file)\n self._cleanup(self.wordtype_file)\n self._cleanup(self.excluded_words_file)\n raise", "def detect_text(img):\n \n with io.open(img, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image) # returns TextAnnotation\n df = pd.DataFrame(columns=['description'])\n texts = response.text_annotations\n for text in texts:\n df = df.append(\n dict(\n \n description= clean_text (text.description)\n ),\n ignore_index=True\n )\n \n porter = PorterStemmer()\n\n try:\n text= (df['description'][0])\n text = porter.stem(text)\n except IndexError:\n text = 'i am neutral'\n # print (analyze(text))\n \n \n # print(df['description'])\n print(text)\n if len (text.split())<3:\n text = 'i am neutral'\n\n sentiment_dict= analyze2(text) \n if sentiment_dict >= 0.008: \n Category.append('Positive') \n return('Positive') \n\n elif (sentiment_dict > - 0.008) & (sentiment_dict < 0.008): \n Category.append('Random')\n return('Random')\n\n elif (sentiment_dict <= -0.008):\n Category.append('Negative')\n return('Negative')", "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "def process(self):\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))\n # np.ones((5, 5), np.uint8)\n # self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_OPEN, kernel, iterations=1)\n self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_GRADIENT, kernel, iterations=1)\n self.output_image = cv.morphologyEx(self.output_image, cv.MORPH_CLOSE, kernel, iterations=3)\n return self.output_image", "def emo_detect(text_list,language='English',method ='dimensional',output='data_frame',resolution = 'words_in_text', folder='',max_len=500000):\n \n # Dictionary containing file names of affect disctionaries:\n file_dict = {'english':{'dimensional':('english_anew_dict',),'discrete':('english_nstar_liwc_dict','english_star_liwc_dict')},\\\n 'german':{'dimensional':('german_anew_dict',),'discrete':('german_nstar_liwc_dict','german_star_liwc_dict')},\\\n 'chinese':{'discrete':('chinese_nstar_liwc_dict','chinese_star_liwc_dict')}}\n # Dictionary containing column names of data frames:\n colname_dict = {'english':{'dimensional':['PosVal', 'NegVal','Arousal','Dominance', 'PosCount','DetectCount','TokenCount','ValSq'],'discrete':['function', 'pronoun', 'ppron', 'i', 'we', 'you', 'shehe', 'they', 'ipron', 'article', 'prep', 'auxverb', 'adverb', 'conj', 'negate', 'verb', 'adj', 'compare', 'interrog', 'number', 'quant', 'affect', 'posemo', 'negemo', 'anx', 'anger', 'sad', 'social', 'family', 'friend', 'female', 'male', 'cogproc', 'insight', 'cause', 'discrep', 'tentat', 'certain', 'differ', 'percept', 'see', 'hear', 'feel', 'bio', 'body', 'health', 'sexual', 'ingest', 'drives', 'affiliation', 'achieve', 'power', 'reward', 'risk', 'focuspast', 'focuspresent', 'focusfuture', 'relativ', 'motion', 'space', 'time', 'work', 'leisure', 'home', 'money', 'relig', 'death', 'informal', 'swear', 'netspeak', 'assent', 'nonflu', 'filler','DetectCount','TokenCount']},\\\n 'german':{'dimensional':['PosVal', 'NegVal','Arousal','Dominance', 'PosCount','DetectCount','Imagine','Potency', 'DomPot_Count','TokenCount','ValSq'],'discrete':['Pronoun', 'I', 'We', 'Self', 'You', 'Other', 'Negate', 'Assent', 'Article', 'Preps', 'Number', 'Affect', 'Posemo', 'Posfeel', 'Optim', 'Negemo', 'Anx', 'Anger', 'Sad', 'Cogmech', 'Cause', 'Insight', 'Discrep', 'Inhib', 'Tentat', 'Certain', 'Senses', 'See', 'Hear', 'Feel', 'Social', 'Comm', 'Othref', 'Friends', 'Family', 'Humans', 'Time', 'Past', 'Present', 'Future', 'Space', 'Up', 'Down', 'Incl', 'Excl', 'Motion', 'Occup', 'School', 'Job', 'Achieve', 'Leisure', 'Home', 'Sports', 'TV', 'Music', 'Money', 'Metaph', 'Relig', 'Death', 'Physcal', 'Body', 'Sexual', 'Eating', 'Sleep', 'Groom', 'Swear', 'Nonfl', 'Fillers', 'Swiss', 'Ideo', 'Personalpronomina', 'Indefinitpronomina', 'AuxiliaryVerbs', 'Konjunktionen', 'Adverbien','Bedrohung', 'DetectCount','TokenCount']},\\\n 'chinese':{'discrete':['function', 'pronoun', 'ppron', 'i', 'we', 'you', 'shehe', 'they', 'youpl', 'ipron', 'prep', 'auxverb', 'adverb', 'conj', 'negate', 'quanunit', 'prepend', 'specart', 'tensem', 'focuspast', 'focuspresent', 'focusfuture', 'progm', 'particle', 'modal_pa', 'general_pa', 'compare', 'interrog', 'number', 'quant', 'affect', 'posemo', 'negemo', 'anx', 'anger', 'sad', 'social', 'family', 'friend', 'female', 'male', 'cogproc', 'insight', 'cause', 'discrep', 'tentat', 'certain', 'differ', 'percept', 'see', 'hear', 'feel', 'bio', 'body', 'health', 'sexual', 'ingest', 'drives', 'affiliation', 'achieve', 'power', 'reward', 'risk', 'relativ', 'motion', 'space', 'time', 'work', 'leisure', 'home', 'money', 'relig', 'death', 'informal', 'swear', 'netspeak', 'assent', 'nonflu', 'filler', 'DetectCount','TokenCount']}}\n # Normalize language and method parameters:\n language = language.lower()\n method = method.lower()\n #Initiate empty stemmer:\n stemmer = []\n # Counter:\n c = 0\n # Get the files to load from the file_dict:\n files = file_dict[language][method]\n # Get column names from col_dict:\n colnames = colname_dict[language][method]\n # Load files:\n # One dictionary (dimensional):\n if len(files) == 1:\n with open(folder + files[0],'rb') as f:\n dicts = pickle.load(f)\n # Length of affect vectors in dictionary:\n vec_len = len(list(dicts.values())[0])\n pos_list = 0\n # Two dictionaries (discrete):\n if len(files) == 2:\n with open(folder + files[0],'rb') as f:\n naster_disc_dict = pickle.load(f)\n with open(folder + files[1],'rb') as f:\n aster_disc_dict = pickle.load(f)\n dicts = (naster_disc_dict,aster_disc_dict)\n # Length of affect vectors in dictionary:\n vec_len = len(list(naster_disc_dict.values())[0])\n # Generate stemmer if german dimensional affect detection:\n if method == 'dimensional':\n if language == 'german':\n stemmer = SnowballStemmer('german')\n elif method == 'discrete':\n # List of stem lengths in the word stem dictionary:\n pos_list = list(reversed(list(aster_disc_dict.keys())))\n # Initiate vec_list if output is data_frame, and emo_mat if output is array:\n if output == 'data_frame': \n vec_list = []\n elif output == 'array':\n emo_mat = np.zeros([max_len,max_len,vec_len])\n # for resolution 'sentences_in_text' create an emo_mat with one additional layer for sentence counter\n if resolution == 'sentences_in_text':\n emo_mat = np.zeros([max_len,max_len,vec_len+1])\n\n # Iterate over texts in text_list:\n for c,text in enumerate(text_list):\n # Print counter every 10,000 texts:\n if c % 10000 == 0:\n print(c)\n # Ignore if it's not a text:\n if not isinstance(text,str):\n continue\n # Resolution words in text:\n if resolution == 'words_in_text':\n # create vector or array (depending on 'ouptut'):\n emo_thingy = text_detect(text=text,max_len=max_len,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = output)\n # Skip text if output of 'text_detect' is 0.\n if isinstance(emo_thingy, int):\n continue\n # Append emo_thingy to vec_list or add to emo_mat (depending on 'ouptut'):\n elif output == 'data_frame': \n vec_list.append(emo_thingy)\n elif output == 'array':\n emo_mat[emo_thingy[0],:emo_thingy[0]+1,:] += emo_thingy[1] \n elif resolution == 'words_in_sentence':\n sent_list = sentence_tokenize(text,language=language)\n for sent in sent_list:\n # create vector or array (depending on 'ouptut'):\n emo_thingy = text_detect(text=sent,max_len=max_len,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = output)\n # Skip text if output of 'text_detect' is 0.\n if isinstance(emo_thingy, int):\n continue\n # Append emo_thingy to vec_list or add to emo_mat (depending on 'ouptut'):\n elif output == 'data_frame': \n vec_list.append(emo_thingy)\n elif output == 'array':\n emo_mat[emo_thingy[0],:emo_thingy[0]+1,:] += emo_thingy[1] \n elif resolution == 'sentences_in_text':\n sent_list = sentence_tokenize(text,language=language)\n n_sent = len(sent_list)\n if (n_sent > max_len) or (n_sent == 0):\n continue\n n_sent = n_sent - 1\n for sc,sent in enumerate(sent_list):\n emo_thingy = text_detect(text=sent,max_len=10000,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = 'data_frame')\n if isinstance(emo_thingy, int):\n continue\n emo_thingy = np.append(emo_thingy,np.array([1]))\n emo_mat[n_sent,sc,:] += emo_thingy\n # return data frame or array, depending on 'output':\n if output == 'data_frame':\n return(pd.DataFrame(vec_list,columns=colnames))\n elif output == 'array':\n return(emo_mat)", "def detect_document(path):\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n print('\\nBlock confidence: {}\\n'.format(block.confidence))\n\n for paragraph in block.paragraphs:\n print('Paragraph confidence: {}'.format(\n paragraph.confidence))\n\n for word in paragraph.words:\n word_text = ''.join([\n symbol.text for symbol in word.symbols\n ])\n print('Word text: {} (confidence: {})'.format(\n word_text, word.confidence))\n\n for symbol in word.symbols:\n print('\\tSymbol: {} (confidence: {})'.format(\n symbol.text, symbol.confidence))\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))", "def _extract_opinions(self):\n self.data['adjectives'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADJ))\n self.data['adverbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADV))\n self.data['verbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, VERB))", "def substantiate():", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def parse(self, word):\n # Ok so now let's do the second FST\n f2 = FST('morphology-parse')\n f2.add_state('start')\n f2.initial_state = 'start'\n \n #add states for the word lick\n for w in list('lick'):\n state_name = 'lick-' + w\n f2.add_state(state_name)\n #add first letter \n f2.add_arc('start', 'lick-l', 'l', 'l')\n \n #add arc for the word lick\n lick = list('lick')\n for w in range(0,len(lick)-1):\n f2.add_arc('lick-'+lick[w], 'lick-'+lick[w+1], lick[w+1], lick[w+1] )\n \n #add states for the word lick \n for w in list('want'):\n state_name = 'want-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'want-w', 'w', 'w')\n #add arc for the word want\n want = list('want')\n for w in range(0,len(want)-1):\n f2.add_arc('want-'+want[w], 'want-'+want[w+1], want[w+1], want[w+1] )\n\n #add states for the word sync\n sync = list('sync')\n for w in sync:\n state_name = 'sync-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'sync-s', 's', 's')\n #add arc for the word sync\n for w in range(0,len(sync)-1):\n f2.add_arc('sync-'+sync[w], 'sync-'+sync[w+1], sync[w+1], sync[w+1] )\n \n #add states for the word panic\n panic = list('panic')\n for w in panic:\n state_name = 'panic-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'panic-p', 'p', 'p')\n #add arc for the word panic\n for w in range(0,len(panic)-1):\n f2.add_arc('panic-'+panic[w], 'panic-'+panic[w+1], panic[w+1], panic[w+1] )\n \n #add states for the word havoc\n havoc = list('havoc')\n for w in havoc:\n state_name = 'havoc-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'havoc-h', 'h', 'h')\n #add arc for the word havoc\n for w in range(0,len(havoc)-1):\n f2.add_arc('havoc-'+havoc[w], 'havoc-'+havoc[w+1], havoc[w+1], havoc[w+1] )\n \n f2.add_state('intermediate1')\n f2.add_state('intermediate2')\n f2.add_state('pres1')\n f2.add_state('past1')\n \n f2.add_arc('lick-k', 'intermediate1', '', '')\n f2.add_arc('want-t', 'intermediate1', '', '')\n f2.add_arc('sync-c', 'intermediate1', '', '')\n f2.add_arc('panic-c', 'intermediate1', 'k', '')\n f2.add_arc('havoc-c', 'intermediate1', 'k', '')\n \n f2.add_arc('intermediate1', 'pres1', 'ing', '+present participle form')\n f2.add_arc('intermediate1', 'past1', 'ed', '+past form')\n\n f2.set_final('pres1')\n f2.set_final('past1')\n \n if ''.join(word[-3:]) == 'ing':\n inputs = word[:-3]\n inputs.append('ing')\n elif ''.join(word[-2:]) == 'ed':\n inputs = word[:-2]\n inputs.append('ed')\n else:\n inputs = word\n \n output = f2.transduce(inputs)[0]\n return ''.join(output)", "def extract_emo_relations(self):\n for tweet_idx, tweet in enumerate(self.tweets):\n tweet_tokens = []\n idx2word, child2parent = {}, {}\n for word in tweet.rstrip().split('\\n'):\n if not word:\n sys.stderr.write(\"wat\")\n continue\n curr_word = Word(word.rstrip().split('\\t'), tweet_idx)\n idx2word[curr_word.idx] = curr_word\n child2parent[curr_word] = curr_word.parent\n\n # Isolate emotion words that are Verbs or Adjectives\n if curr_word.text in self.emo_kws and curr_word.pos in self.POS_LIST:\n self.tweet2emo[tweet_idx].append(curr_word)\n curr_word.is_emotion_word = True\n\n tweet_tokens.append(curr_word.text)\n\n # update tweet dictionary and add children to words\n self.add_relatives(child2parent, idx2word)\n tweet_text = \" \".join(tweet_tokens)\n self.idx2tweet[tweet_idx] = tweet_text\n\n # Create Tweet object\n self.add_tweet(tweet_idx, tweet_text, tweet_tokens, list(idx2word.values()))", "def preprocess(self, documents):\n\n # Store the total number of documents\n num_docs = np.float(len(documents))\n\n # A dict storing the frequency of each word across all documents\n total_word_freq = {}\n\n # A dict storing the number of documents that word appears in\n doc_word_freq = {}\n\n # Iterate over all documents\n for doc in documents:\n # Split the string into a list of words\n words = extract_words(doc)\n\n # Update the 'total_word_freq' dict using all words in 'words'\n for w in words:\n ''' YOUR CODE HERE '''\n if w not in total_word_freq.keys():\n total_word_freq[w] = 1\n else:\n total_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # Update the 'doc_word_freq' dict. Remember to only add '1' corresponding to\n # each word in a document. In case a word appears twice in a document, then\n # it should be ignored. We use the set() data structure to achieve this.\n for w in set(words):\n ''' YOUR CODE HERE '''\n if w not in doc_word_freq:\n doc_word_freq[w] = 1\n else:\n doc_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # A set of words with total frequency less than 'self.min_freq'\n remove_words = set()\n\n ''' YOUR CODE HERE '''\n\n # Check frequency of each word and add to 'remove_words'\n for w in total_word_freq.keys():\n if total_word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'total_word_freq' and\n # 'doc_word_freq'.\n for w in remove_words:\n del total_word_freq[w]\n del doc_word_freq[w]\n\n # Create a numpy array to store frequencies from which\n # we can create the 'self.idf' preprocessed numpy array.\n word_freq_tensor = np.zeros(len(doc_word_freq))\n\n # For each word in 'doc_word_freq' dict, update\n # 'self.word_to_idx' and 'self.idx_to_word' and\n # 'word_freq_tensor'.\n i = 0\n for w in doc_word_freq.keys():\n self.word_to_idx[w] = i \n self.idx_to_word[i] = w\n word_freq_tensor[i] = doc_word_freq[w]\n i+=1\n \n #print(word_freq_tensor.shape)\n #print(word_freq_tensor)\n # Calculate 'self.idf' (see hint.pdf for formula)\n self.idf = -1*np.log(word_freq_tensor/(len(documents)))\n ''' END YOUR CODE HERE '''" ]
[ "0.6044923", "0.5885203", "0.58177", "0.58032626", "0.5795096", "0.5754858", "0.56634796", "0.5636914", "0.56317765", "0.56152934", "0.5607152", "0.5593497", "0.55799764", "0.557679", "0.55065227", "0.5489352", "0.5421111", "0.53919905", "0.53863907", "0.5376077", "0.5364964", "0.5350816", "0.5343218", "0.533754", "0.5316757", "0.531188", "0.52995425", "0.5296432", "0.5295658", "0.5284694" ]
0.7516963
0
wrap for lemmas_freq_doc with local session
def lemmas_freq_doc2(doc_id): db.doc_apply(doc_id, lemmas_freq_doc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lemmas_freq_doc(doc):\n lemmas = {}\n morpho = doc.morpho\n for i in morpho:\n # if this is a word\n if 'analysis' in i.keys():\n # if there is few lex\n if len(i['analysis']):\n for l in i.get('analysis', []):\n if l.get('lex', False):\n if (not l['lex'] in stop_lemmas) & (l.get('wt', 0) > 0):\n lemmas[l['lex']] = lemmas.get(l['lex'], 0) + l.get('wt', 1)\n else:\n # english word or number or smth like this\n word = i.get('text', '')\n # take word, don't take number\n if (len(word) > 0) and not word.isdigit():\n lemmas[word] = lemmas.get(word, 0) + 1\n doc.lemmas = lemmas", "def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]", "def process_frequencies(df_corpus, wdir, min_MFF, max_MFF, mode, names_MFF):\n # Normalization of the frequencies by the sum of the text\n df_corpus = df_corpus.loc[:].div(df_corpus.sum(axis='columns'), axis=\"index\")\n if mode == \"train\":\n # If we are doing a training corpus, it is easier\n \n # The dataframe gets a new summatory column that we use to order the df \n df_corpus = df_corpus.T\n df_corpus[\"sum\"]=df_corpus.sum(axis=\"columns\")\n df_corpus = df_corpus.sort_values(by=\"sum\", ascending=False)\n \n # Only a given amount of words is taken\n df_corpus = df_corpus[min_MFF:max_MFF]\n # Summatory column is deleted and the df goes back to its normal format\n del df_corpus['sum']\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n \n elif mode == \"eval\" or mode == \"test\":\n # If we create the evaluation or the test corpus, we have to check first the features of the train corpus because the 5000 MFW of the train corpus are NOT the 5000 MFW of the test corpus.\n # TODO: I don't know if that is the best way to do it. Maybe we should calculate the total amount of features in the different corpora, get the list of the n MFF and then fill the diferent matrixs with this features.\n df_corpus = df_corpus.reindex_axis(names_MFF, axis=1)\n # Only a given amount of words is taken\n df_corpus = df_corpus.T\n df_corpus = df_corpus[min_MFF:max_MFF]\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n\n df_corpus = df_corpus.fillna(0)\n \n # The table is saved as csv\n df_corpus.to_csv(wdir+\"freq_table.csv\", sep='\\t', encoding='utf-8', index=True)\n\n return df_corpus", "def __init__(self):\n self.freq = {}", "def update_frequencies():\n pass", "def freq():", "def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()", "def source_freq(self) -> int:", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def freq(self, freq=None):\n raise NotImplementedError()", "def getTokenFrequency(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.word_to_docs_path, 'rb') as bin:\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n if wordid_in_file == wordid:\n break\n # skip documents:\n int.from_bytes(bin.read(4 * frequency), 'big')\n\n prevFile = int.from_bytes(bin.read(4), 'big')\n num = 1\n for i in range(frequency):\n currFile = int.from_bytes(bin.read(4), 'big')\n if currFile != prevFile:\n num += 1\n prevFile = currFile\n\n return num", "def get_freq(self, min_gram=1, max_gram=6, freq_limit=1):\n wanted = Counter()\n for s in self.texts:\n l_s = len(s)\n for gram in range(min_gram, max_gram+1):\n wanted.update(s[i: i+gram] for i in range(0, l_s-gram))\n if freq_limit < 1:\n freq_limit = int(len(self.texts)*freq_limit)\n if freq_limit == 1:\n return wanted\n else:\n return self.get_limited(wanted, freq_limit)", "def GetFrequency(self):\n ...", "def getTokenCollectionFrequency(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.word_to_docs_path, 'rb') as bin:\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # skip documents:\n int.from_bytes(bin.read(4 * frequency), 'big')\n if wordid_in_file == wordid:\n return frequency\n return 0", "def lemma_fd(adm):\n def is_contentful(token):\n return analysis(token).get('partOfSpeech') in CONTENTFUL_POS_TAGS\n tokens = adm['attributes']['token']['items']\n return Counter(token_key(t) for t in tokens if is_contentful(t))", "def buildFrequencyDic(L):\n pass", "def freq_window(self, startwindow, stopwindow, window=\"hann\"):\n n = self.times.size\n fwindow = _freq_window(self.fs, n, startwindow, stopwindow, window=window)\n new_response = self.from_freq(self.fs, self.in_freq * fwindow)\n\n return new_response", "def get_freq(self):\n return self.freq", "def set_freq(self, freq):\n\n return self._service.exposed_set_freq(freq)", "def setFreq(self,newfreq):\n\t\tself.freq = newfreq;", "def get_word_frequency_per_year(client, search_term='', pos='', item_list_name=''):\n\t\n\tif not search_term:\n\t\tprint \"Enter a word to count its frequency:\"\n\t\tsearch_term = raw_input()\n\n\tlists = client.get_item_lists()\n\n\tprimary_text = ''\n\ttemp_result = []\n\tresult = {}\n\titems = []\n\ttotal_sum = 0\n\n\tif item_list_name:\n\t\titem_list = client.get_item_list_by_name(item_list_name)\n\t\titems = item_list.get_all()\n\telse:\n\t\titem_lists = client.get_item_lists()\n\n\t\tfor l in item_lists['own']:\n\t\t\titems.append(client.get_item_list(l['item_list_url']))\n\n\n\tfor item in items:\n\t\tprimary_text = item.get_primary_text()\n\t\tyear = item.item_metadata['alveo:metadata']['dc:created']\n\t\twords = word_tokenize(primary_text.lower())\n\t\tfiltered_words = []\n\t\t\n\t\tif pos:\n\t\t\tfor i in range(len(words)):\n\t\t\t\tif words[i] == search_term:\n\t\t\t\t\tfiltered_words.append(words[i-1])\n\t\t\t\t\tfiltered_words.append(words[i])\n\t\t\t\t\tfiltered_words.append(words[i+1])\n\n\t\t\ttagged_words = nltk.pos_tag(filtered_words)\n\t\t\tf = nltk.FreqDist(tagged_words)\n\n\t\t\tword_pos = (search_term, pos)\n\t\t\n\t\t\n\t\t\tif word_pos in f.keys():\n\t\t\t\t\n\t\t\t\tprint word_pos\n\n\t\t\t\tword_freq = f[word_pos]\n\t\t\telse:\n\t\t\t\tword_freq = words.count(search_term)\n\t\telse:\n\t\t\tword_freq = words.count(search_term)\n\t\t\tprint word_freq, search_term\n\t\t\ttotal_sum = total_sum + word_freq\n\t\n\t\ttemp_result.append((year, word_freq))\n\n\n\tprint \"total sum: \" + str(total_sum)\n\n\n\tfor i in temp_result:\n\t\tif i[0] in result.keys():\n\t\t\tresult[i[0]] = result[i[0]] + i[1]\n\t\telse:\n\t\t\tresult[i[0]] = i[1]\n\n\n\treturn result", "def reduced_frequency(cutoff):\n print 'reduced frequency method'\n global global_word_list\n global global_reduced_freqs\n\n doc_length = len(global_word_list)\n print 'number of words in files: {}'.format(doc_length)\n count = 0\n freq_list = count_words(global_word_list) # Calls count_words()\n\n for (w, freq) in freq_list.items():\n # a count for testing\n count += 1\n # if count % 100 == 0:\n # print '.',\n # if count % 10000 == 0:\n # print '\\n{}'.format(count)\n # end of count\n global_reduced_freqs[w] = 0\n interval = doc_length / freq\n if interval != doc_length and freq > cutoff:\n for i in range(0, doc_length, interval):\n # Checking if a word is in interval\n if w in global_word_list[i: interval + i]:\n global_reduced_freqs[w] += 1", "def make_term_doc_matrix(self):\n print \"\\ngenerating term-frequency matrix:\"\n\n try:\n len_of_code_book = self.len_of_code_book\n except AttributeError as e:\n print \">>> temp histogram method not run. exit()\"\n sys.exit(1) # change this to serch for the longest histgoram in the directory ??\n\n list_of_histograms = []\n for d_cnt, date in sorted(enumerate(os.listdir(self.hist_path))):\n directory = os.path.join(self.hist_path, date)\n print \" >\", date\n for recording in sorted(os.listdir(directory)):\n list_of_histograms.append((recording, directory, len_of_code_book))\n\n if self.config['hists']['parallel']:\n num_procs = mp.cpu_count()\n pool = mp.Pool(num_procs)\n chunk_size = int(np.ceil(len(list_of_histograms)/float(num_procs)))\n results = pool.map(h.worker_padd, list_of_histograms, chunk_size)\n pool.close()\n pool.join()\n else: # for sequential debugging:\n results = []\n for cnt, event in enumerate(list_of_histograms):\n print \"adding to feature space: \", event[0]\n results.append(h.worker_padd(event))\n\n uuids = [uuid for (uuid, hist) in results]\n f = open(self.accu_path + \"/list_of_uuids.p\", \"w\")\n pickle.dump(uuids, f)\n f.close()\n\n # features = np.vstack(results)\n features = np.vstack([hist for (uuid, hist) in results])\n new_features = h.recreate_data_with_high_instance_graphlets(self.accu_path, features, self.config['hists']['low_instances'])\n return True", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def freq(self, frequency: Optional[int]):", "def getFreq(self,):\n\t\treturn self.freq;", "def get_normalized_term_freq_list(clean_corpus, num_docs, C):\n tf_list = []\n normalized_tf_list = []\n if C.STEMMING == True:\n ps =PorterStemmer()\n # if C.LEMMATIZATION == True:\n # wordnet_lemmatizer = lemmatize([token])[0]\n for document in clean_corpus:\n my_dict = {}\n for term in document:\n for token in nltk.word_tokenize(term):\n if C.LEMMATIZATION == True:\n token1 = lemmatize([token])\n if len(token1) == 0: \n continue\n else:\n token = token1[0]\n if C.STEMMING == True:\n token = ps.stem(token)\n if token not in my_dict.keys():\n my_dict[token] = 0\n my_dict[token] = my_dict[token] + 1\n # This tf_list contains term frequency in a document\n tf_list.append(my_dict)\n normalized_tf_list.append(my_dict)\n\n # Normalize and take log\n for i in range(num_docs):\n sq = 0\n for key in tf_list[i].keys():\n normalized_tf_list[i][key] = 1 + math.log2(tf_list[i][key])\n sq = sq + (normalized_tf_list[i][key] ** 2)\n sq = math.sqrt(sq)\n for key in tf_list[i].keys():\n normalized_tf_list[i][key] = normalized_tf_list[i][key]/sq\n return normalized_tf_list", "def get_freq(self,term):\n\n try:\n return self.freq_dict[term]\n except:\n return None", "def getFrequencyDics(corpus, freqTaux=1):\n tokenVocab, posVocab = {unk: freqTaux + 1, empty: freqTaux + 1}, {unk: freqTaux + 1, empty: freqTaux + 1}\n for sent in corpus.trainingSents:\n trans = sent.initialTransition\n while trans:\n if trans.configuration.stack:\n tokens = getTokens(trans.configuration.stack[-1])\n if tokens:\n tokenTxt, posTxt = attachTokens(tokens)\n for c in tokenTxt:\n if c.isdigit():\n tokenTxt = number\n tokenVocab[tokenTxt] = 1 if tokenTxt not in tokenVocab else tokenVocab[tokenTxt] + 1\n posVocab[posTxt] = 1 if posTxt not in posVocab else posVocab[posTxt] + 1\n if tokenTxt:\n tokenVocab[tokenTxt] = 1 if tokenTxt not in tokenVocab else tokenVocab[tokenTxt] + 1\n\n trans = trans.next\n if configuration['embedding']['compactVocab']:\n if configuration['others']['verbose']:\n sys.stdout.write(tabs + 'Compact Vocabulary cleaning:' + doubleSep)\n sys.stdout.write(tabs + 'Before : {0}\\n'.format(len(tokenVocab)))\n for k in tokenVocab.keys():\n if k not in [empty, unk, number] and k.lower() not in corpus.mweTokenDictionary and '_' not in k:\n del tokenVocab[k]\n if configuration['others']['verbose']:\n sys.stdout.write(tabs + 'After : {0}\\n'.format(len(tokenVocab)))\n\n else:\n if configuration['others']['verbose']:\n sys.stdout.write(tabs + 'Non frequent word cleaning:' + doubleSep)\n sys.stdout.write(tabs + 'Before : {0}\\n'.format(len(tokenVocab)))\n for k in tokenVocab.keys():\n if tokenVocab[k] <= freqTaux and '_' not in k and k.lower() not in corpus.mweTokenDictionary:\n if uniform(0, 1) < configuration['constants']['alpha']:\n del tokenVocab[k]\n if configuration['others']['verbose']:\n sys.stdout.write(tabs + 'After : {0}\\n'.format(len(tokenVocab)))\n return tokenVocab, posVocab" ]
[ "0.59216833", "0.5677444", "0.55503094", "0.55252546", "0.55219114", "0.55058557", "0.5487935", "0.54210114", "0.5392454", "0.53751504", "0.5373541", "0.5278519", "0.5270846", "0.5263379", "0.52370554", "0.5235091", "0.52321196", "0.5221891", "0.52202755", "0.5213417", "0.5209386", "0.5208831", "0.51979274", "0.51834863", "0.51634806", "0.5159411", "0.51421386", "0.5141012", "0.5138011", "0.5130196" ]
0.6881848
0