query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Report the detailed timesheet for today.
def do_rrt(self, arg): self.do_timesheet('report extend track today')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_rt(self, arg):\n self.do_timesheet('report today')", "def show_today_tasks(self):\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()\n print(f'Today {today.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def report_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n # Shift 'report' keyword\n args = args[1:]\n pname = tname = ''\n mask = TS_GROUP_BY['date'] | TS_GROUP_BY['task']\n # Get report parameters\n try:\n tname, pname, started, finished, mask = \\\n self.get_report_parameters(args, default_mask=mask)\n except ValueError, error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track(tname, pname)\n if not track:\n print(\"There are no tracks have been found.\")\n return\n started = track['started']\n # Check if there is an unfinished task\n task = self.db.get_active_task(started, finished, tname, pname)\n if task:\n print(u\"Warning: There is an unfinished task '{task}#{project}' \"\n \"in the period from '{started}' to '{finished}'.{eol}\"\n \"The unfinished record will be ignored.{eol}\"\n \"Proceed creating the report? [Y/n] \"\n \"\".format(task=task['tname'], project=task['pname'],\n started=datetime.date.strftime(\n started, \"%x\").decode('utf8'),\n finished=datetime.date.strftime(\n finished, \"%x\").decode('utf8'),\n eol=os.linesep), end='')\n if not helpers.get_yes_no(default='y'):\n return\n # Make a report\n self.make_report(tname, pname, started, finished, mask)", "def today(self):\r\n return RecordsToday(self)", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def logDayDetails(self):\n console().info(\"Today is {}.\".format(self.today.strftime(DATE_FMT)))\n hours = self.contractDetails.tradingHours.split(\";\")[0].split(\":\")[1]\n console().info(\"Today's Trading Hours Are: {}\".format(hours))\n if self.normalDay:\n console().info(\"Today is a Valid Day for Trading\")\n else:\n console().info(\"Today is not a Valid Trading Day. Sleeping Until Tomorrow\")", "def today():\n this_cal = Kalendar()\n to_display = \"TODAY:<BR><BR>\"\n\n elements = this_cal.get_all_day_elements(datetime.datetime.now())\n for element in elements:\n for key, values in element.items():\n to_display += key + \":<BR>\"\n for val in values:\n to_display += \"&nbsp;&nbsp;&nbsp;&nbsp;\" + val + \"<BR>\"\n\n return to_display", "def do_rw(self, arg):\n self.do_timesheet('report week')", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def printSummary(self):\n\t\tweekWorkHours = None\n\t\tdayDelta = None\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.weekend:\n\t\t\t\tif weekWorkHours:\n\t\t\t\t\thours = weekWorkHours.total_seconds() // 3600\n\t\t\t\t\tmins = weekWorkHours.seconds // 60 % 60\n\t\t\t\t\tprinty('------{}hrs-----'.format(hours), 'y')\n\t\t\t\t\tweekWorkHours = None\n\t\t\t\t\tdayDelta = None\n\t\t\t\tprinty('{:02d}. (WE)'.format(num), 'w')\n\t\t\telif day.daytype == DayType.holiday:\n\t\t\t\tprinty('{:02d}. (Urlaub)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.illness:\n\t\t\t\tprinty('{:02d}. (Krank)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.overtime_free:\n\t\t\t\tprinty('{:02d}. (Überstundenausgleich)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.business_trip:\n\t\t\t\tprinty('{:02d}. (Dienstreise)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.work:\n\t\t\t\tdayDelta = day.getWorkingTime()\n\t\t\t\tworkhours = dayDelta.seconds // 3600\n\t\t\t\tworkrestminutes = dayDelta.seconds // 60 % 60\n\t\t\t\tabsday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')\n\t\t\t\ttoday = datetime.today()\n\t\t\t\tpauseDelta = day.getPauseTime()\n\t\t\t\tpausehours = pauseDelta.seconds // 3600\n\t\t\t\tpauserestminutes = pauseDelta.seconds // 60 % 60\n\t\t\t\tif absday == today:\n\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')\n\t\t\t\telif absday > today:\n\t\t\t\t\t# future days\n\t\t\t\t\tif len(day.timeblocks) == 0:\n\t\t\t\t\t\tprinty('{:02d}. ?'.format(num), 'g')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')\n\t\t\t\telse:\n\t\t\t\t\t# past days\n\t\t\t\t\tif dayDelta > timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')\n\t\t\t\t\telif dayDelta < timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')\n\t\t\tif weekWorkHours == None:\n\t\t\t\tweekWorkHours = dayDelta\n\t\t\telse:\n\t\t\t\tif dayDelta:\n\t\t\t\t\tweekWorkHours = weekWorkHours + dayDelta", "def print_event_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'event_start_date': self.event_start_date,\n 'event_end_date': self.event_end_date,\n 'agenda': self.env.context.get('default_agenda_id'),\n },\n }\n return self.env.ref('agenda_esi.recap_report').report_action(self, data=data)", "def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n datas = {'ids': context.get('active_ids', [])}\n\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n datas.update({'form': res})\n return self.pool['report'].get_action(cr, uid, ids, \n 'l10n_cl_hr_payroll.report_hrsalarybymonth', \n data=datas, context=context)", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def today(self):\n return [t for t in self.tasks if t.date == datetime.date.today()]", "def get_timetable(self):\n\n # Download schedule\n log.debug('downloading timetable for \"{}\"'.format(self.school_year))\n download_file(self.schedule_url, self.schedule_filename)\n\n # Read XML data\n xml_data = read_file(self.schedule_filename)\n\n # Give it to Beautiful Soup for pretty parsing\n soup = BeautifulSoup(xml_data, 'html.parser')\n\n update_time_regex = re.compile(r'\\d{2}\\/\\d{2}\\/\\d{4}\\s?\\d{2}:\\d{2}:\\d{2}')\n update_time_str = update_time_regex.findall(soup.find('footer').get_text())[0]\n update_time_dt = datetime.datetime.strptime(update_time_str, '%d/%m/%Y %H:%M:%S')\n self.update_time = str(datetime.datetime.strftime(update_time_dt, '%d/%m/%Y %H:%M:%S')) \n\n self.save_update_time()\n\n # Compute a correspondance tables between 'rawweeks' and the first weekday\n self.week_dates_mapping = {\n span.alleventweeks.get_text(): span['date'] \n for span in soup.find_all('span')\n }\n\n log.debug('find all events for \"{}\".'.format(self.school_year))\n self.unformatted_events = soup.find_all('event')", "def time_to_generate_monthly_report(today):\n # We will make three attempts to generate the monthly report every month\n return today.day in (1, 2, 3)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})", "def daily_table(self):\n htable = [0 for i in range(7)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[6]\n htable[evtime] += 1\n return htable", "def print_day(day):\n timetable = load_table(day)\n if timetable is False:\n print(\"file not found\")\n else:\n max_name = 0\n max_roomcode = 0\n max_type = 0\n for line in timetable:\n if len(line[1]) > max_name:\n max_name = len(line[1])\n if len(line[2]) > max_roomcode:\n max_roomcode = len(line[2])\n if len(line[4]) > max_type:\n max_type = len(line[4])\n print_title_border_horiz(max_name, max_roomcode, max_type)\n for line in timetable:\n print_data_line(line, max_name, max_roomcode, max_type)\n print_title_border_horiz(max_name, max_roomcode, max_type)", "def weekly_report(request):\n if TimeCheck().is_ready():\n # get the list of items for the email\n # this will include all active items with an expiration date\n # that occurs within the next 31 days\n exclude_date = dt.now() + datetime.timedelta(days=31)\n items = StockItem.objects\\\n .filter(active=True)\\\n .exclude(date_of_expiration__gt=exclude_date)\\\n .order_by('date_of_expiration')\n response = send_weekly_report(items)\n return HttpResponse(response.content)\n else:\n return HttpResponse('It is too soon to send another email.')", "def get_date_hour_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%dT%H\")", "def add_daily_summary(self):\n auth_date = self.report_date.strftime(\"%b %-d, %Y\")\n now = datetime.now().strftime(\"%x %X\")\n report_title = ' '.join([\n f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',\n f'(generated on {now} Central)'\n ])\n\n report_notes = [\n ['Notes:'],\n [f'Validation details on this sheet for {self.origin_value} participants only'],\n ['Checkbox validation currently only performed on GROR consents'],\n ['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']\n ]\n\n self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))\n # Add any explanatory text / details about the report that have been included in the layout\n self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),\n row_pos=self.row_pos + 1)\n\n if not self._has_needs_correcting(self.consent_df):\n self._add_text_rows(text_rows=[['No consent validation errors detected']],\n format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)\n\n # Daily summary counts for all the recently authored consents that were processed (regardless of errors)\n self._add_text_rows([['Total Consent Validation Counts']],\n format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)\n self._add_consent_issue_count_header_section(hpo='All Entities')\n self._add_consent_issue_counts(self.consent_df, show_all_counts=True)", "def print_header(now):\n global config\n date_time = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n\n print('*************************************')\n print(f'HTTP LOGS STATISTICS - {date_time}')" ]
[ "0.72559977", "0.62758464", "0.62248325", "0.6223336", "0.6099756", "0.60254157", "0.5918867", "0.58366483", "0.58032715", "0.5800318", "0.57899195", "0.5779033", "0.5711421", "0.5580187", "0.552534", "0.55175817", "0.5431576", "0.54253596", "0.53975874", "0.539205", "0.53869677", "0.5377114", "0.5353261", "0.5327161", "0.5324522", "0.52920336", "0.5270249", "0.5254644", "0.52509135", "0.5215293" ]
0.6635239
1
Report the timesheet for a week.
def do_rw(self, arg): self.do_timesheet('report week')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))", "def do_rrw(self, arg):\n self.do_timesheet('report extend track week')", "def weekly():", "def do_upw(self, arg):\n self.do_timesheet('update week')", "def next_week(self, table):\n if (\"week\" + str(self.week + 1)) not in self.t.timeline:\n self.t.add_week()\n self.week += 1\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def printSummary(self):\n\t\tweekWorkHours = None\n\t\tdayDelta = None\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.weekend:\n\t\t\t\tif weekWorkHours:\n\t\t\t\t\thours = weekWorkHours.total_seconds() // 3600\n\t\t\t\t\tmins = weekWorkHours.seconds // 60 % 60\n\t\t\t\t\tprinty('------{}hrs-----'.format(hours), 'y')\n\t\t\t\t\tweekWorkHours = None\n\t\t\t\t\tdayDelta = None\n\t\t\t\tprinty('{:02d}. (WE)'.format(num), 'w')\n\t\t\telif day.daytype == DayType.holiday:\n\t\t\t\tprinty('{:02d}. (Urlaub)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.illness:\n\t\t\t\tprinty('{:02d}. (Krank)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.overtime_free:\n\t\t\t\tprinty('{:02d}. (Überstundenausgleich)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.business_trip:\n\t\t\t\tprinty('{:02d}. (Dienstreise)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.work:\n\t\t\t\tdayDelta = day.getWorkingTime()\n\t\t\t\tworkhours = dayDelta.seconds // 3600\n\t\t\t\tworkrestminutes = dayDelta.seconds // 60 % 60\n\t\t\t\tabsday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')\n\t\t\t\ttoday = datetime.today()\n\t\t\t\tpauseDelta = day.getPauseTime()\n\t\t\t\tpausehours = pauseDelta.seconds // 3600\n\t\t\t\tpauserestminutes = pauseDelta.seconds // 60 % 60\n\t\t\t\tif absday == today:\n\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')\n\t\t\t\telif absday > today:\n\t\t\t\t\t# future days\n\t\t\t\t\tif len(day.timeblocks) == 0:\n\t\t\t\t\t\tprinty('{:02d}. ?'.format(num), 'g')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')\n\t\t\t\telse:\n\t\t\t\t\t# past days\n\t\t\t\t\tif dayDelta > timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')\n\t\t\t\t\telif dayDelta < timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')\n\t\t\tif weekWorkHours == None:\n\t\t\t\tweekWorkHours = dayDelta\n\t\t\telse:\n\t\t\t\tif dayDelta:\n\t\t\t\t\tweekWorkHours = weekWorkHours + dayDelta", "def do_upm(self, arg):\n self.do_timesheet('update week')", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def record_week_totals(self, user, start, end, num=10):\n for idx, total in \\\n self.weekly_play_counts(user, start, end, num, order_by_plays=True):\n yield idx, ldates.date_of_index(idx), total", "def full_weeks(self, bot, update, group_name):\n week_number = self.week()\n bot.send_message(update.message.chat_id,\n text='`{}`\\n'.format(group_name) + self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')\n week_number.next()\n bot.send_message(update.message.chat_id,\n text=self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')", "def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date", "def report_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n # Shift 'report' keyword\n args = args[1:]\n pname = tname = ''\n mask = TS_GROUP_BY['date'] | TS_GROUP_BY['task']\n # Get report parameters\n try:\n tname, pname, started, finished, mask = \\\n self.get_report_parameters(args, default_mask=mask)\n except ValueError, error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track(tname, pname)\n if not track:\n print(\"There are no tracks have been found.\")\n return\n started = track['started']\n # Check if there is an unfinished task\n task = self.db.get_active_task(started, finished, tname, pname)\n if task:\n print(u\"Warning: There is an unfinished task '{task}#{project}' \"\n \"in the period from '{started}' to '{finished}'.{eol}\"\n \"The unfinished record will be ignored.{eol}\"\n \"Proceed creating the report? [Y/n] \"\n \"\".format(task=task['tname'], project=task['pname'],\n started=datetime.date.strftime(\n started, \"%x\").decode('utf8'),\n finished=datetime.date.strftime(\n finished, \"%x\").decode('utf8'),\n eol=os.linesep), end='')\n if not helpers.get_yes_no(default='y'):\n return\n # Make a report\n self.make_report(tname, pname, started, finished, mask)", "def test_report(klass, weeks_of_tickets, date, datetime, tzutc):\n\n expected = [\n [\"Week\", \"Ticket\"],\n [date(2016, 5, 15), 3],\n [date(2016, 5, 22), 2],\n [date(2016, 5, 29), 0],\n [date(2016, 6, 5), 0],\n [date(2016, 6, 12), 2],\n [date(2016, 6, 19), 3],\n [date(2016, 6, 26), 0],\n ]\n\n r = klass(\n title=\"SLA Breach Report\",\n start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday\n end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday\n )\n report = r.report_on(weeks_of_tickets, {\"Ticket\": 14})\n\n assert report.table == expected", "def testWeeklyOvertimes(self):\n dates = self.dates\n for day_num in xrange(28, 31):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 4, day_num)\n ))\n for day_num in xrange(5, 9):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 5, day_num)\n ))\n for day in dates:\n self.make_logs(day)\n\n def check_overtime(week0=Decimal('55.00'), week1=Decimal('55.00'),\n overtime=Decimal('30.00')):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n weekly_totals = response.context['weekly_totals'][0][0][0][2]\n self.assertEqual(weekly_totals[0], week0)\n self.assertEqual(weekly_totals[1], week1)\n self.assertEqual(weekly_totals[5], overtime)\n check_overtime()\n #Entry on following Monday doesn't add to week1 or overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 5, 9)))\n check_overtime()\n #Entries in previous month before last_billable do not change overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 4, 24)))\n check_overtime()\n #Entry in previous month after last_billable change week0 and overtime\n self.make_logs(utils.add_timezone(\n datetime.datetime(2011, 4, 25, 1, 0)\n ))\n check_overtime(Decimal('66.00'), Decimal('55.00'), Decimal('41.00'))", "def lessons_week(self, bot, update, group_name):\n week_number = self.week()\n bot.send_message(update.message.chat_id,\n text='`{}`\\n'.format(group_name) + self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')", "def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))", "def week(self, week):\n\n self._week = week", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def send_weekly_report_slack():\n quarterly_text = get_report_text(90)\n annual_text = get_report_text(365)\n text = (\n \"*Monthly Metrics Report*\\n\"\n \"This is an automated monthly report on some of our key metrics.\\n\\n\"\n f\"\\tIn the last 90 days we saw:\\n\\n{quarterly_text}\\n\\n\"\n f\"\\tIn the last 365 days we saw:\\n\\n{annual_text}\\n\\n\"\n f\"See more details at our submissions/outcomes <{PUBLIC_REPORTING_URL}|dashboard>.\"\n )\n send_slack_message(settings.SLACK_MESSAGE.WEEKLY_REPORT, text)", "def _getWeeklyPlayHours(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n weekDaysCount = account_shared.currentWeekPlayDaysCount(time_utils._g_instance.serverUTCTime, serverRegionalSettings['starting_time_of_a_new_day'], serverRegionalSettings['starting_day_of_a_new_week'])\n return self._getDailyPlayHours() + sum(self.__stats.dailyPlayHours[1:weekDaysCount])", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def work_refresh(self):\n now = dt.now()\n self.eisenhower_priority()\n p_week = now.isocalendar()[1] - self.work_datetime.isocalendar()[1]\n\n if (1 <= p_week) and (self.priority not in [1, 2]):\n self.time_ntf = now\n else:\n pass", "def last_week(self, table):\n if self.week == 1:\n return\n self.week -= 1\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)", "def the_week_url():\n return '/timeline/%d/%02d/%d/' % \\\n (datetime.now().year, datetime.now().month, timekit.monthweek(datetime.now()))", "def week_report_handle(fans_type):\n\t#import pdb;pdb.set_trace()\n\tlast_day = datetime.date.today()-timedelta(days=datetime.datetime.today().weekday() + 1)\n\ttoday = datetime.date.today()\n\n\tfans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_day, date__lte=today).order_by(\"date\")\n\n\tstart = fans_pages[0]\n\tlast = fans_pages[len(fans_pages) - 1]\n\n\t#talk_about_is = (last.talk_about_is - start.talk_about_is) / (start.talk_about_is + 0.0) * 100\n\ttalk_about_is = (last.talk_about_is - start.talk_about_is)\n\t#total_like_count = (last.total_like_count - start.total_like_count) / (start.total_like_count + 0.0) * 100\n\ttotal_like_count = (last.total_like_count - start.total_like_count)\n\t#total_fans = (last.total_fans - start.total_fans) / (start.total_fans + 0.0) * 100\n\ttotal_fans = (last.total_fans - start.total_fans)\n\treturn {\"talk_about_is\":talk_about_is, \"total_like_count\":total_like_count, \"total_fans\":total_fans, \"start\":start.date, \"last\":last.date}", "def hr_report():\n\n # Load the peak data.\n db = Persistence()\n if not (activities := db.load_all()):\n print(\"No data to report on\")\n return\n\n # Find the maximum for each value.\n max = _load_max_values(activities)\n\n # Totals for the current week\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta()\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n # Print the peak data for each week.\n current_weekday = None\n for activity in activities:\n\n # Time to break to a new week?\n if current_weekday is None or current_weekday > activity.start_time.weekday():\n if current_weekday:\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta(0)\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n _print_header()\n\n # Capture the weekday.\n if current_weekday is None or current_weekday != activity.start_time.weekday():\n week_work_days = week_work_days + 1\n\n current_weekday = activity.start_time.weekday()\n\n # Print the detail.\n _print_detail(activity, max)\n\n # Find the duration.\n duration = activity.end_time - activity.start_time\n\n # Accumulate for this week\n week_distance_total = week_distance_total + activity.distance\n if activity.elevation:\n week_elevation_total = week_elevation_total + activity.elevation\n week_duration_total = week_duration_total + duration\n week_5sec_average.append(activity.peak_5sec_hr)\n week_30sec_average.append(activity.peak_30sec_hr)\n week_60sec_average.append(activity.peak_60sec_hr)\n if activity.peak_5min_hr:\n week_5min_average.append(activity.peak_5min_hr)\n if activity.peak_10min_hr:\n week_10min_average.append(activity.peak_10min_hr)\n if activity.peak_20min_hr:\n week_20min_average.append(activity.peak_20min_hr)\n if activity.peak_30min_hr:\n week_30min_average.append(activity.peak_30min_hr)\n if activity.peak_60min_hr:\n week_60min_average.append(activity.peak_60min_hr)\n if activity.peak_90min_hr:\n week_90min_average.append(activity.peak_90min_hr)\n if activity.peak_120min_hr:\n week_120min_average.append(activity.peak_120min_hr)\n\n # Final footer.\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n\n # Print the summary.\n _print_summary(max)", "def weekly_report(session, temperature, windspeed, pathname):\n starttime = session.timeEvent\n endtime = starttime - datetime.timedelta(weeks=1)\n\n try:\n queryperiod = epm.QueryPeriod(starttime, endtime)\n processInterval = datetime.timedelta(minutes=15)\n aggregationdetails = epm.AggregateDetails(processInterval, epm.AggregateType.Interpolative)\n temperature_data = temperature.historyReadAggregate(aggregationdetails, queryperiod)\n windspeed_data = windspeed.historyReadAggregate(aggregationdetails, queryperiod)\n\n \n except:\n raise Exception('get interpolative data error')", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def period_week_at(self, at_time=\"00:00:00\", week_day=\"Monday\"):\n for task in self._tasks:\n task.period_week_at(at_time=at_time, week_day=week_day)\n\n return self" ]
[ "0.78466284", "0.744055", "0.7247169", "0.688721", "0.65291095", "0.65165263", "0.6470279", "0.64689136", "0.64216584", "0.63769406", "0.63683814", "0.63074833", "0.62996227", "0.62685275", "0.62551403", "0.623483", "0.62168926", "0.62150455", "0.6206032", "0.62004954", "0.617794", "0.613606", "0.6096315", "0.6089438", "0.60846376", "0.6078514", "0.6057263", "0.6050094", "0.6004441", "0.59820765" ]
0.75920844
1
Report the timesheet for a month.
def do_rm(self, arg): self.do_timesheet('report month')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_to_generate_monthly_report(today):\n # We will make three attempts to generate the monthly report every month\n return today.day in (1, 2, 3)", "def monthly_schedule(self,month):\n response = requests.get(f'http://company.com/{self.lname}/{month}')\n if response.ok:\n return response.text\n else:\n return 'Bad Response!'", "def report_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n # Shift 'report' keyword\n args = args[1:]\n pname = tname = ''\n mask = TS_GROUP_BY['date'] | TS_GROUP_BY['task']\n # Get report parameters\n try:\n tname, pname, started, finished, mask = \\\n self.get_report_parameters(args, default_mask=mask)\n except ValueError, error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track(tname, pname)\n if not track:\n print(\"There are no tracks have been found.\")\n return\n started = track['started']\n # Check if there is an unfinished task\n task = self.db.get_active_task(started, finished, tname, pname)\n if task:\n print(u\"Warning: There is an unfinished task '{task}#{project}' \"\n \"in the period from '{started}' to '{finished}'.{eol}\"\n \"The unfinished record will be ignored.{eol}\"\n \"Proceed creating the report? [Y/n] \"\n \"\".format(task=task['tname'], project=task['pname'],\n started=datetime.date.strftime(\n started, \"%x\").decode('utf8'),\n finished=datetime.date.strftime(\n finished, \"%x\").decode('utf8'),\n eol=os.linesep), end='')\n if not helpers.get_yes_no(default='y'):\n return\n # Make a report\n self.make_report(tname, pname, started, finished, mask)", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def monthly_report(request, site, year, month):\n siteObj = get_object_or_404(Site, pk=site)\n yearly = YearlyStatistics.objects.filter(siteId=siteObj).filter(year=year)\n monthly = MonthlyStatistics.objects.filter(siteId=siteObj).filter(year=year).filter(month=month)\n daily = DailyStatistics.objects.filter(siteId=siteObj).filter(year=year).filter(month=month)\n dailyManual = RawManualData.objects.filter(siteId=siteObj).filter(year=year).filter(month=month)\n\n if siteObj and yearly and monthly and daily and (siteObj.isPublic and\n siteObj.owner.is_active or siteObj.owner == request.user):\n climate = {'temp': Climate().TEMP_DISTRIBUTION_LIMITS, 'wind': Climate().WIND_DIRECTION_LIMITS,\n 'rh': Climate().RH_DISTRIBUTION_LIMITS}\n datasetNum = []\n a = MonthlyReport(site, year, month, monthly, yearly, daily, dailyManual)\n\n significants = {}\n for code in Weather.WEATHER_CODE:\n key = code[1]\n value = monthly[0].significants.get(code[0], 0)\n significants[key] = value\n\n return render(request, 'climate/monthly_view.html', {'site': siteObj, 'num': datasetNum, 'year': yearly,\n 'month': month, 'report': a, 'climate': climate,\n 'significants': significants})\n else:\n return render(request, 'climate/main.html', {})", "def atten_month(list_emp, month):\r\n count = 0\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n # writes new\\re writes attendance_log from the beginning\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Monthly Attendance Report:\\n\")\r\n # for each worker\r\n for worker in list_emp:\r\n # going through all dates marked\r\n for date in worker.attendance:\r\n # getting only month\r\n date_list = date[:10:].split(\"/\")\r\n if int(date_list[1]) == int(month):\r\n count += 1\r\n # first date in report for this worker so need to write name first\r\n if count == 1:\r\n attendance_by_emp.write(\"%s-\\n\\t%s\\n\" % (worker.name, date))\r\n # not first date for this worker, write only date\r\n else:\r\n attendance_by_emp.write(\"\\t%s\\n\" % date)\r\n count = 0\r\n print(\"Report issued!\\n\")", "def month(self):\n return 0", "def month(self):\n return 0", "def monthly_table(self):\n htable = [0 for i in range(12)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[1]\n htable[evtime - 1] += 1\n return htable", "def month(m):\n\t\tx = db.cquery(\"month\",m)\n\t\tprint \"Total:\", x[0] #@BUG when zero sometimes displays \"1\"\n\t\tf = raw_input(\"[L]ist [N]ew overview or[B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tui.mont1(m)\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(),i[0], i[1],\" \",ui.statstimein(), i[2], ui.statstimeout(),i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\": home_stats()\n\t\telif f == \"b\": home()\n\t\telse:\n\t\t\traw_input(\"I didnt get that... Press [Enter] to go back to stats...\")\n\t\t\thome_stats()", "def get_general_financial_report_for_month(self, period):\n try:\n month = int(period.split('-')[0])\n year = int(period.split('-')[1])\n start_date = datetime(year, month, 1)\n end_date = datetime(year, month, calendar.monthrange(year, month)[1])\n\n res = []\n\n worked_shifts = [x[0] for x in self.db_handler.get_ended_shift_ids_in_period(start_date, end_date)] # to unpack sets, cuz they're formated like (value,)\n\n for shift in worked_shifts:\n res.append((shift, self.db_handler.get_ended_registrations_by_shift_id(shift)))\n\n self.logger.write_to_log(f'overall financial report for period {period} get', 'model')\n\n return res\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def every_month(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_month', 'time':time, 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()", "def do_rt(self, arg):\n self.do_timesheet('report today')", "def test_monthly_report(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n fetch = self.client().get('/expenses?name=soda', headers=dict(Authorization=\"Bearer \" + access_token))\n result = json.loads(fetch.data)\n\n consolidated_total = 212.23\n res = self.client().get('/monthly_report?month=01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['consolidated_total'], consolidated_total)", "def do_month(ts, routes=\"m\"):\n pgconn = get_dbconn(\"coop\", user=\"nobody\")\n ccursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n nt = NetworkTable(\"IACLIMATE\")\n sql = \"\"\"SELECT station, sum(precip) as total, max(day) as lastday\n from alldata_ia WHERE year = %s and month = %s\n and station != 'IA0000' and substr(station,2,1) != 'C'\n GROUP by station\"\"\" % (\n ts.year,\n ts.month,\n )\n\n lats = []\n lons = []\n vals = []\n lastday = None\n ccursor.execute(sql)\n for row in ccursor:\n if row[\"station\"] not in nt.sts:\n continue\n if lastday is None:\n lastday = row[\"lastday\"]\n lats.append(nt.sts[row[\"station\"]][\"lat\"])\n lons.append(nt.sts[row[\"station\"]][\"lon\"])\n vals.append(row[\"total\"])\n\n mp = MapPlot(\n title=\"%s - %s\"\n % (ts.strftime(\"%d %B %Y\"), lastday.strftime(\"%d %B %Y\")),\n subtitle=\"%s Total Precipitation [inch]\" % (ts.strftime(\"%B %Y\"),),\n )\n mp.contourf(\n lons, lats, vals, [0, 0.1, 0.25, 0.5, 0.75, 1, 2, 3, 4, 5, 6, 7]\n )\n mp.plot_values(lons, lats, vals, fmt=\"%.2f\")\n\n pqstr = (\n \"plot %s %s summary/iemre_iowa_total_precip.png \"\n \"%s/summary/iemre_iowa_total_precip.png png\"\n ) % (routes, ts.strftime(\"%Y%m%d%H%M\"), ts.strftime(\"%Y/%m\"))\n mp.postprocess(pqstr=pqstr)", "def plot_messages_by_month(self, **kwargs):\n assert not (self.__messages_by_month is None), 'First call get_messages_by_month'\n self.__messages_by_month.plot(title= 'Messages by month', **kwargs)", "def _setupMonthlyTotals(self):\n self.billable_project = factories.BillableProject()\n self.nonbillable_project = factories.NonbillableProject()\n self.all_logs(self.user, self.billable_project,\n self.nonbillable_project)\n self.all_logs(self.user2, self.billable_project,\n self.nonbillable_project)\n self.login_user(self.superuser)\n self.response = self.client.get(self.url, self.args)\n self.rows = self.response.context['monthly_totals']\n self.labels = self.response.context['labels']", "def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )", "def setMonth(self, *args):\n return _libsbml.Date_setMonth(self, *args)", "def calendar_month(year, month):\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end", "def formatmonth(self, theyear, themonth, withyear=True):\n\n schedules = Calendar_Group.objects.filter(day__month=themonth)\n\n v = []\n a = v.append\n a('<div class=\"table-responsive\"><table class=\"table table-bordered\" cellpadding=\"0\" cellspacing=\"0\" class=\"month\">')\n a('\\n')\n a(self.formatmonthname(theyear, themonth, withyear=withyear))\n a('\\n')\n a(self.formatweekheader())\n a('\\n')\n for week in self.monthdays2calendar(theyear, themonth):\n a(self.formatweek(week, schedules))\n a('\\n')\n a('</table></div>')\n a('\\n')\n return ''.join(v)", "def depart_arrive_stats_by_month(flights):\n\n return ...", "def done_report(year_number, month_number):\n done_data, updated_at = get_done_data()\n\n done_data = [ record for record in done_data if record.doneon.year == year_number\n and record.doneon.month == month_number ]\n\n context = {\n 'title': 'Medley Development Report %s/%s: %s Done' % (month_number, year_number, \n len(done_data), ),\n 'done_data': done_data,\n 'headers': done_data[0]._fields,\n 'updated_at': updated_at,\n 'version': __version__,\n }\n response = make_response(render_template('done-report.txt', **context))\n response.headers['Content-Type'] = \"text/plain\"\n return response", "def setIndexMonth(self,index):\n self.indexMonth = index", "def view_month(request, year, month):\n year, month = int(year), int(month)\n transactions = Transaction.month(\n year,\n month,\n user=request.user,\n account__include_in_balance=True,\n )\n start = make_aware(datetime(year, month, 1))\n balance = Transaction.objects.filter(\n user=request.user,\n account__include_in_balance=True,\n date__lt=start,\n ).sum()\n return render(request, 'ledger/pages/view_month.html', {\n 'title': start.strftime('%B %Y'),\n 'transactions': transactions,\n 'balance': balance,\n 'prev_month': adjust_month(year, month, -1),\n 'next_month': adjust_month(year, month, 1),\n })", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def set_month(self, month):\r\n\t\tmonths = ['Enero', 'Febrero', 'Marzo', 'Abril',\r\n\t\t\t\t 'Mayo', 'Junio', 'Julio', 'Agosto'\r\n\t\t\t\t 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\r\n\t\tfor i in range(12):\r\n\t\t\tif month == i: \r\n\t\t\t\treturn months[i-1]", "def month(self, month):\n\n self._month = month", "def print_month_header(month):\n print(\"Month #\" + str(month))\n print(\"Sun Mon Tue Wed Thu Fri Sat\")", "def get_month():\n return handle_invalid_inputs(question_3, months)" ]
[ "0.67374146", "0.64609563", "0.61592793", "0.6139588", "0.6077845", "0.6037842", "0.5957328", "0.5957328", "0.5925088", "0.5911148", "0.5853519", "0.5830335", "0.5751275", "0.5735978", "0.5717099", "0.5709584", "0.56879324", "0.5683817", "0.5680816", "0.5671539", "0.5660934", "0.56360817", "0.563605", "0.5635161", "0.56311566", "0.5626522", "0.56192845", "0.56135786", "0.56054455", "0.5581696" ]
0.6691562
1
Report the timesheet for a year.
def do_ry(self, arg): self.do_timesheet('report year')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_year(self, year):\n pass", "def report_year(self, report_year):\n\n self._report_year = report_year", "def report_year(self, report_year):\n\n self._report_year = report_year", "def report_year(self, report_year):\n\n self._report_year = report_year", "def increment_year(self):", "def generate_recruitment_year_report(year):\n recruitments = []\n for rec in Recruitment.query.all():\n summary = Recruitment.get_cycles_summary([rec])\n rec_json = Recruitment.to_json(rec, summary['overall_candidates_num'])\n\n if rec_json['end_date'].split(',')[0].split('/')[2] == str(year):\n recruitments.append(rec_json)\n\n if len(recruitments) == 0:\n return 404\n\n plot_data = get_plot_date(recruitments)\n\n columns = ['Liczba\\nmiejsc', 'Liczba\\nkandydatów', 'Limit\\npunktów', 'Wydział',\n 'Stopień', 'Nazwa kierunku', 'Tryb\\nstudiów']\n\n with PdfPages(\"rest/recruitment_year.pdf\") as pdf:\n current_row = 0\n row_limit = 24\n\n while current_row < len(plot_data):\n fig, axes = plt.subplots(figsize=(8.27, 11.69))\n\n fig.patch.set_visible(False)\n axes.xaxis.set_visible(False)\n axes.yaxis.set_visible(False)\n axes.axis('off')\n\n if current_row + row_limit < len(plot_data):\n last_row = current_row + row_limit\n else:\n last_row = len(plot_data)\n\n rec_table = axes.table(\n cellText=plot_data[current_row:last_row],\n colLabels=columns,\n loc='upper center',\n cellLoc='center'\n )\n\n rec_table.auto_set_font_size(False)\n rec_table.auto_set_column_width([0, 1, 2, 3, 4, 5, 6])\n rec_table.set_fontsize(9)\n\n for i in range(len(columns)):\n rec_table[(0, i)].set_facecolor(\"#56b5fd\")\n rec_table[(0, i)].set_height(.025)\n\n rec_table.scale(1, 2)\n\n if current_row == 0:\n plt.title(f'Rekrutacje na rok {year}')\n\n pdf.savefig()\n plt.close()\n\n current_row += row_limit\n\n return \"recruitment_year.pdf\"", "def formatyear(self, theyear):\n\t\tv = []\n\t\ta = v.append\n\t\ta('<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" class=\"year\">')\n\t\ta('\\n')\n\t\ta('<tr><th>%s</th></tr>' % theyear)\n\t\tfor m in range(1, 13):\n\t\t\ta('<tr>')\n\t\t\ta('<td>')\n\t\t\ta(self.formatmonthinyear(theyear, m))\n\t\t\ta('</td>')\n\t\t\ta('</tr>')\n\t\ta('</table>')\n\t\treturn ''.join(v)", "def showNextYear(self):\n pass", "def set_year (self, year):\n self.year = year", "def year(cls, year: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-01-01T00:00:00/9\"", "def set_start_year(self, year):\n return self.form.set_value(\"output period \\\"year from\\\"\", str(year))", "def gen_year_report(\n lib, years_ordered, actually_markdown=False):\n report = []\n if not actually_markdown:\n # Generate header.\n report.append(gen_header())\n report.append('<table class=\"table\">\\n')\n report.append(' <tr>\\n')\n report.append(' <th> Year </th>\\n')\n report.append(' <th> # </th>\\n')\n report.append(' </tr>\\n')\n\n # Years\n n_papers_across_years = 0\n n_papers_this_year = {}\n for year in years_ordered:\n n_papers_this_year[year] = len(lib.get_pubs(year=year))\n n_papers_across_years += n_papers_this_year[year]\n\n for year in years_ordered:\n style = gen_count_style(\n n_papers_this_year[year], n_papers_across_years)\n report.append(' <tr>\\n')\n report.append(' <td> {0} </td>\\n'.format(year))\n report.append(' <td {0}> {1} </td>\\n'.format(\n style, n_papers_this_year[year]))\n report.append(' </tr>\\n')\n\n # Total\n style = gen_count_style(n_papers_across_years, n_papers_across_years)\n report.append(' <tr>\\n')\n report.append(' <td class=\"text-right\"> Total </td>\\n')\n report.append(\n ' <td {0}> {1} </td>\\n'.format(\n style, n_papers_across_years))\n report.append(' </tr>\\n')\n\n if not actually_markdown:\n report.append(gen_footer())\n\n return report", "def set_finish_year(self, year):\n return self.form.set_value(\"output period \\\"year to\\\"\", str(year))", "def index(self, year=None):\n if year is not None:\n return year.start.year - self.year.start.year + 1\n else:\n return self.year.index()", "def year(self, year):\n\n self._year = year", "def test_spider_gets_specific_year(self):\n spider = Eia923Spider()\n resp = factories.TestResponseFactory(eia923=True)\n\n result = spider.form_for_year(resp, 2007)\n\n assert result is not None\n assert result.url == \"https://www.eia.gov/electricity/data/eia923/\" \\\n \"archive/xls/f906920_2007.zip\"\n assert result.meta[\"year\"] == 2007\n\n for year in range(2001, 2019):\n result = spider.form_for_year(resp, year)\n assert result is not None", "def test_yearly_report(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n consolidated_total = 212.23\n res = self.client().get('/yearly_report?year=2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['consolidated_total'], consolidated_total)", "def main(request, year=None):\n\tif year: year = int(year)\n\telse: year = time.localtime()[0]\n\n\tnowYear, nowMonth = time.localtime()[:2]\n\tlst = []\n\n\tfor y in [year, year+1, year+2]:\n\t\tmonthLst = []\n\t\tfor n, month in enumerate(MONTH_NAMES):\n\t\t\tentry\t= current = False\n\t\t\tentries\t= entry.objects.filter(date__year=y, date__month=n+1)\n\n\t\t\tif entries:\n\t\t\t\tentry = True\n\t\t\tif y == nowYear and n+1 == nowMonth:\n\t\t\t\tcurrent = True\n\t\t\tmonthLst.append(dict(n=n+1, name=month, entry=entry, current=current))\n\t\tlst.append((y, monthLst))\n\n\treturn render_to_response(\"cal/\", dict(years=lst, user=request.user, year=year, reminders=reminders(request)))", "def system_year(year):\n\tthis_query = Query('system_year', year)\n\tthis_query.send_query()\n\tresponse = this_query.pull_result()\n\treturn jsonify(response)\n\t#return render_template('response.html', response=response)", "def set_calender_year(self, year):\n self.single_selection_from_kendo_dropdown(self.calender_year_kendo_dropdown_locator, year)", "def yearShown(self):\n return self.currentYear", "def set_year(self, year):\n if year not in range(1970, 2120):\n raise ValueError(\"Year must be in range [1970..2129] but is {}\".format(year))\n\n # First we separate the tens and the digit. We also shift the year to\n # the range [0..159]\n tens, digit = divmod(int(year - 1970), 10)\n\n # Then we add them in a single int\n reg_value = (tens << 4) | digit\n\n # The we add it to a registory\n self.__write_register(_REGISTER_YEAR, reg_value)", "def test_valid_year(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n obj = ar[2009]\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsYear))", "def test_iter_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual([ary.year for ary in ar], [2008,2009])", "def year(self) -> int:\r\n return self._year", "def do_rt(self, arg):\n self.do_timesheet('report today')", "def yearname(self):\n return self.strftime(\"%Y\")", "def test_found_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual(ar.years, [2008,2009])", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def _set_year(self, year) -> bool:\n if self.set_start_year(year) is False:\n return False\n return self.set_finish_year(year)" ]
[ "0.6860223", "0.64029205", "0.64029205", "0.64029205", "0.62339383", "0.6200401", "0.61287", "0.6083873", "0.5985608", "0.5970167", "0.59635514", "0.58967537", "0.5879202", "0.58490413", "0.5846017", "0.58369195", "0.57155764", "0.5681742", "0.56666756", "0.5647257", "0.5627508", "0.56255984", "0.5563096", "0.55630475", "0.5533198", "0.5516081", "0.54926854", "0.5469954", "0.5458671", "0.5458438" ]
0.78585577
0
Command timesheet timesheet's related commands Usage timesheet | Desciption Timesheet is a command manipulates a tracking data and creates a report. It updates tracks for a specified period, changes time was spent and may set a track as unbilled. A data of a report can be narrowed to fetch tracks for a certain task or project, combined and grouped by dates, projects, tracks and tasks using 'extend' parameter.
def do_timesheet(self, arg): def _usage(): self.do_help('timesheet') commands = ['update', 'report'] words = shlex.split(arg) words = [token.lower() for token in words] if not len(words) or words[0] not in commands: print(self.error_wrong_parameters) return if words[0] == 'update': self.update_timesheet(words) elif words[0] == 'report': self.report_timesheet(words) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_rrt(self, arg):\n self.do_timesheet('report extend track today')", "def report_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n # Shift 'report' keyword\n args = args[1:]\n pname = tname = ''\n mask = TS_GROUP_BY['date'] | TS_GROUP_BY['task']\n # Get report parameters\n try:\n tname, pname, started, finished, mask = \\\n self.get_report_parameters(args, default_mask=mask)\n except ValueError, error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track(tname, pname)\n if not track:\n print(\"There are no tracks have been found.\")\n return\n started = track['started']\n # Check if there is an unfinished task\n task = self.db.get_active_task(started, finished, tname, pname)\n if task:\n print(u\"Warning: There is an unfinished task '{task}#{project}' \"\n \"in the period from '{started}' to '{finished}'.{eol}\"\n \"The unfinished record will be ignored.{eol}\"\n \"Proceed creating the report? [Y/n] \"\n \"\".format(task=task['tname'], project=task['pname'],\n started=datetime.date.strftime(\n started, \"%x\").decode('utf8'),\n finished=datetime.date.strftime(\n finished, \"%x\").decode('utf8'),\n eol=os.linesep), end='')\n if not helpers.get_yes_no(default='y'):\n return\n # Make a report\n self.make_report(tname, pname, started, finished, mask)", "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def do_rrw(self, arg):\n self.do_timesheet('report extend track week')", "def do_rt(self, arg):\n self.do_timesheet('report today')", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "async def daily(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def command_help():\n\n help_me = \"\"\"\n Help for Calendar. The calendar commands are\n\n add DATE START END DETAILS add the event DETAILS at the specified DATE with specific START and END time\n show show all events in the calendar\n delete DATE NUMBER delete the specified event (by NUMBER) from\n the calendar\n quit quit this program\n help display this help message\n\n Examples: user data follows command:\n\n command: add 2018-10-12 18 19 dinner with jane\n success\n\n command: show\n 2018-10-12 :\n start : 08:00,\n end : 09:00,\n title : Eye doctor\n\n start : 12:30,\n end : 13:00,\n title : lunch with sid\n\n start : 18:00,\n end : 19:00,\n title : dinner with jane\n 2018-10-29 :\n start : 10:00,\n end : 11:00,\n title : Change oil in blue car\n\n start : 12:00,\n end : 14:00,\n title : Fix tree near front walkway\n\n start : 18:00,\n end : 19:00,\n title : Get salad stuff, leuttice, red peppers, green peppers\n 2018-11-06 :\n start : 18:00,\n end : 22:00,\n title : Sid's birthday\n\n command: delete 2018-10-29 10\n deleted\n\n A DATE has the form YYYY-MM-DD, for example\n 2018-12-21\n 2016-01-02\n\n START and END has a format HH where HH is an hour in 24h format, for example\n 09\n 21\n\n Event DETAILS consist of alphabetic characters,\n no tabs or newlines allowed.\n \"\"\"\n return help_me", "def do_upt(self, arg):\n self.do_timesheet('update today')", "async def timein(self, ctx):\n\t\tif ctx.invoked_subcommand is None:\n\t\t\tawait send_cmd_help(ctx)", "async def dailytomorrow(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def update_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n try:\n started, finished = helpers.parse_date_parameters(args[1:])\n except ValueError as error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track()\n if track:\n started = track['started']\n else:\n started = finished\n # Get timesheet records\n tracks = self.db.get_tracks_by_date(started, finished,\n also_unfinished=False)\n # Exposure tracks to the table\n tracks_contents = self.create_tracks_contents(tracks)\n lnum = 0\n header = self.get_timesheet_header(started, finished)\n header_length = len(header.split(os.linesep))\n while(True):\n try:\n # Create the editor's contents\n contents = self.create_timesheet_contents(header, tracks_contents)\n timesheet = self.open_external_editor(contents, lnum)\n # we must get the table header here due to the length of the columns\n table_header = timesheet[header_length-1:header_length+1]\n tracks = timesheet[header_length+1:]\n except OSError, message:\n print(\"*** Error: %s\", message)\n return\n # Parse the input\n try:\n data = self.parse_timesheet(tracks, header_length)\n except errors.ParsingError as error:\n print(error.msg)\n print(\"Would you like to update the timesheet again? [Y/n] \")\n if not helpers.get_yes_no(default='y'):\n return\n table_header.extend(tracks)\n tracks_contents = \"\".join(table_header)\n lnum = error.lnum\n continue\n break\n # Update the DB\n # TODO: get rid the danger operation\n self.db.delete_tracks_by_date(started=started, finished=finished)\n data.sort(key=operator.itemgetter('started'))\n for track in data:\n self.db.create_track(track['tid'],\n track['started'], track['finished'],\n int(not bool(track['is_billed'])))\n print('The timesheet has been updated.')", "def do_rw(self, arg):\n self.do_timesheet('report week')", "def main_time_chart(self) -> Component:\n logger.debug('Generating time graph.')\n df = self.activity_manager.metadata_weekly_time_series(activity_type='run')\n\n freq_dropdown = dcc.Dropdown('overview_main_time_chart_freq_dropdown', options=[\n {'label': 'Weekly', 'value': 'weekly'},\n {'label': 'Monthly', 'value': 'monthly'}\n ], value='monthly')\n\n y_dropdown = dcc.Dropdown('overview_main_time_chart_y_dropdown', options=[\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Total distance', 'value': 'total_distance'},\n {'label': 'Total duration', 'value': 'total_duration'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Number of activities', 'value': 'activity_count'}\n ], value='activity_count')\n\n graph = dcc.Graph(\n id='overview_main_time_chart',\n figure=self.main_time_fig('weekly', 'activity_count')\n )\n return html.Div([\n html.H2('Progress over time'),\n dbc.Row([\n dbc.Col(html.Div(['Frequency:', freq_dropdown])),\n dbc.Col(html.Div(['y axis:', y_dropdown]))\n ]),\n graph\n ])", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def setNumTimeSubSteps(*argv):", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 20 events')\n try:\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n if not events:\n print('No upcoming events found.')\n text_file = open(\"scheduledActions.txt\", \"wb\") #May want to use a check on the msg type to only overwrite calendar tasks\n # text_file.write(bytes('Updated '+now[:-8]+'\\n','UTF-8'))\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n start = start[:22] + start[-2:] #Trims the last colon\n start = datetime.datetime.strptime(start,'%Y-%m-%dT%H:%M:%S%z')\n start = int(time.mktime(start.timetuple()))\n end = event['end'].get('dateTime', event['end'].get('date'))\n end = end[:22] + end[-2:] #Trims the last colon\n end = datetime.datetime.strptime(end,'%Y-%m-%dT%H:%M:%S%z')\n end = int(time.mktime(end.timetuple()))\n description = event['description']\n if description.count(',')==5:\n desc1=description.split(\",\")[0] + \",\" + description.split(\",\")[1] + \",\" + description.split(\",\")[2]\n print(start,desc1)\n writeString=str(start)+','+desc1+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n desc2=description.split(\",\")[3] + \",\" + description.split(\",\")[4] + \",\" + description.split(\",\")[5]\n print(end,desc2)\n writeString=str(end)+','+desc2+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n else:\n print(start, description) #event['summary'] event['location']\n writeString=str(start)+','+description+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n text_file.close()\n print('Calendar read complete.')\n except httplib2.ServerNotFoundError:\n print(\"!---- Looks like there's no internet connection just now. Wait till tomorrow.\")", "def do_ts(self, arg):\n self.do_timesheet(arg)", "def report_tasks(self, stylise: bool=True):\n report = pd.DataFrame.from_dict(data=self.pm.report_intent())\n intent_replace = {'transition': 'Transition', 'synthetic_builder': 'SyntheticBuilder', 'wrangle': 'Wrangle',\n 'feature_catalog': 'FeatureCatalog', 'data_tolerance': 'DataTolerance'}\n report['component'] = report.intent.replace(to_replace=intent_replace)\n report['task'] = [x[0][10:] for x in report['parameters']]\n report['parameters'] = [x[1:] for x in report['parameters']]\n report = report.loc[:, ['level', 'order', 'component', 'task', 'parameters', 'creator']]\n if stylise:\n return self._report(report, index_header='level')\n return report", "def submit_hours(self, report):\n raise NotImplementedError", "def main():\n t = Task(\"Clean Garage\")\n t.datetime_start = datetime.datetime(2017, 10, 10)\n t.datetime_end = datetime.datetime(2018, 5, 12)\n\n for k, v in t.__dict__.items():\n print(k, v)", "def reports_cli():", "def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)", "def runTimeCourse(self,duration,stepSize=None,intervals=None,\n TCName=None,adjustParams=None,subSet=None,\n rocket=False,genReactions=False,throttle=None):\n if (stepSize is None) and (intervals is None):\n stepSize = 0.01\n if adjustParams is not None:\n if subSet is None:\n subSet = range(len(adjustParams.index))\n results = []\n if not isinstance(duration,list):\n duration = [duration for i in subSet]\n if len(duration)!=len(subSet):\n return False\n if rocket:\n jobName = \"TC\"+str(randint(0, 99999))\n timeCourses = []\n for setIndex, myDur in zip(subSet,duration):\n myScriptName = os.path.join(self.run_dir,\n jobName+str(setIndex))\n if not os.path.isdir(myScriptName):\n os.mkdir(myScriptName)\n copasi_filename = os.path.join(myScriptName,\n \"timeCourse.cps\")\n if genReactions!=False:\n if isinstance(genReactions,dict):\n self.genReactionAntString(*genReactions)\n else:\n self.genReactionAntString()\n self.recentModel = model.loada(self.reactionAntString,\n copasi_filename)\n else:\n self.recentModel = model.loada(self.antString,\n copasi_filename)\n model.InsertParameters(self.recentModel,\n df=adjustParams,\n index=setIndex,inplace=True)\n if stepSize is not None:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=myDur,\n step_size=stepSize,\n run=False)\n else:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=myDur,\n step_size=myDur/intervals,\n run=False)\n timeCourses.append(self.recentTimeCourse)\n myScriptName = os.path.join(myScriptName,\n jobName + \".sh\")\n shellString = (\"#!/bin/bash\\nCopasiSE \"+\n copasi_filename)\n f = open(myScriptName, 'w')\n f.write(shellString)\n f.close()\n if isinstance(throttle,dict):\n if (\"user\" in throttle.keys() and\n \"jobLimit\" in throttle.keys()):\n while(self.checkRuningProcesses(\n user=throttle[\"user\"]) >=\n throttle[\"jobLimit\"]):\n time.sleep(60)\n os.system(\"sbatch \"+myScriptName)\n while(self.checkRuningProcesses(job=jobName+\".sh\") > 0):\n time.sleep(60)\n for theTimeCourse in timeCourses:\n sucsessful=False\n logging.disable(logging.WARNING)\n while not sucsessful:\n try:\n parse_object = viz.Parse(theTimeCourse)\n results.append(parse_object.data.copy())\n sucsessful = True\n except:\n time.sleep(60)\n logging.disable(logging.NOTSET)\n return results\n else:\n if TCName is None:\n copasi_filename = self.genPathCopasi(\"timeCourse\")\n else:\n copasi_filename = os.path.join(self.run_dir, TCName)\n if genReactions!=False:\n if isinstance(genReactions,dict):\n self.genReactionAntString(*genReactions)\n else:\n self.genReactionAntString()\n self.recentModel = model.loada(self.reactionAntString,\n copasi_filename)\n else:\n self.recentModel = model.loada(self.antString,\n copasi_filename)\n for setIndex, myDur in zip(subSet,duration):\n model.InsertParameters(self.recentModel,\n df=adjustParams,\n index=setIndex,inplace=True)\n \n if stepSize is not None:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=myDur,\n step_size=stepSize)\n else:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=myDur,\n step_size=myDur/intervals)\n results.append(viz.Parse(\n self.recentTimeCourse).data.copy())\n return results\n else:\n if TCName is None:\n copasi_filename = self.genPathCopasi(\"timeCourse\")\n else:\n copasi_filename = os.path.join(self.run_dir, TCName)\n \n if genReactions!=False:\n if isinstance(genReactions,dict):\n self.genReactionAntString(*genReactions)\n else:\n self.genReactionAntString()\n self.recentModel = model.loada(self.reactionAntString,\n copasi_filename)\n else:\n self.recentModel = model.loada(self.antString,\n copasi_filename)\n if stepSize is not None:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=duration,\n step_size=stepSize)\n else:\n self.recentTimeCourse = tasks.TimeCourse(\n self.recentModel,end=duration,\n step_size=duration/intervals)\n return viz.Parse(self.recentTimeCourse).data.copy()", "def show_runs(self,start=0,end=99999999,csv=False):\n if csv:\n print '{:>7}, {:>10}, {:>8}, {:>10}, {:3}, {:2}'.format('Run', \n 'Day', 'Time', 'Length', 'xtc', 'h5') \n \n else:\n print '='*72\n print 'Experiment {:}'.format(self.exp)\n print ' xtc dir {:}'.format(self.xtc_dir)\n print ' hdf5 dir {:}'.format(self.h5_dir)\n print '-'*72\n print '{:>7} {:>10} {:>8} {:>10} {:3} {:2}'.format('Run', 'Day', 'Time', \n 'Length', 'xtc', 'h5') \n print '-'*72\n \n for item in self.runs:\n run = item['num']\n if run >= start and run <= end:\n datestr = time.strftime('%Y-%m-%d',\n time.localtime(item['begin_time_unix']))\n timestr = time.strftime('%H:%M:%S',\n time.localtime(item['begin_time_unix']))\n if len(item['xtc_files']) > 0:\n xtc = 'xtc'\n else:\n xtc = ''\n \n if len(item['h5_files']) > 0:\n h5 = 'h5'\n else:\n h5 = ''\n \n begin_time = item['begin_time_unix']\n end_time = item['end_time_unix'] \n if end_time:\n dtime = end_time - begin_time\n flag = ' '\n else:\n dtime = time.time() - begin_time\n flag = '*'\n\n dmin = int(dtime/60)\n dsec = int(dtime % 60)\n if dmin > 0:\n dtstr = '{:4}m {:02}s'.format(dmin,dsec)\n else:\n dtstr = '{:02}s'.format(dsec)\n\n if csv:\n print '{:7}, {:10}, {:8}, {:>10}, {:3}, {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n else:\n print '{:7} {:10} {:8} {:>10} {:3} {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n\n if flag in '*':\n print '* Currently Acquiring Data for Run {:}'.format(run)", "def task_run_core():\n\n ## initialize parameters\n if task_get_option('format'):\n fmts = task_get_option('format')\n else:\n fmts = 'HB' # default value if no format option given\n for fmt in fmts.split(','):\n last_updated = fetch_last_updated(fmt)\n write_message(\"last stored run date is %s\" % last_updated)\n\n sql = {\n \"all\" : \"\"\"SELECT br.id FROM bibrec AS br, bibfmt AS bf\n WHERE bf.id_bibrec = br.id AND bf.format = '%s'\"\"\" % fmt,\n \"last\": \"\"\"SELECT br.id FROM bibrec AS br\n INNER JOIN bibfmt AS bf ON bf.id_bibrec = br.id\n WHERE br.modification_date >= '%(last_updated)s'\n AND bf.format='%(format)s'\n AND bf.last_updated < br.modification_date\"\"\" \\\n % {'format': fmt,\n 'last_updated': last_updated.strftime('%Y-%m-%d %H:%M:%S')},\n \"missing\" : \"\"\"SELECT br.id\n FROM bibrec as br\n LEFT JOIN bibfmt as bf\n ON bf.id_bibrec = br.id AND bf.format ='%s'\n WHERE bf.id_bibrec IS NULL\n AND br.id BETWEEN %%s AND %%s\n \"\"\" % fmt,\n }\n sql_queries = []\n cds_query = {}\n if task_has_option(\"all\"):\n sql_queries.append(sql['all'])\n if task_has_option(\"last\"):\n sql_queries.append(sql['last'])\n if task_has_option(\"collection\"):\n cds_query['collection'] = task_get_option('collection')\n else:\n cds_query['collection'] = \"\"\n\n if task_has_option(\"field\"):\n cds_query['field'] = task_get_option('field')\n else:\n cds_query['field'] = \"\"\n\n if task_has_option(\"pattern\"):\n cds_query['pattern'] = task_get_option('pattern')\n else:\n cds_query['pattern'] = \"\"\n\n if task_has_option(\"matching\"):\n cds_query['matching'] = task_get_option('matching')\n else:\n cds_query['matching'] = \"\"\n\n if task_has_option(\"recids\"):\n recids = split_cli_ids_arg(task_get_option('recids'))\n else:\n recids = []\n\n ### sql commands to be executed during the script run\n ###\n bibreformat_task(fmt, sql, sql_queries, cds_query, task_has_option('without'), not task_has_option('noprocess'), recids)\n return True", "def makeGanttChart(mode, fileName):\n\n figType = OUTPUT_FILE_EXTENSION # \"png\", \"eps\"\n\n # extract the figure name and target directory (to store the figures)\n figureFileName = fileName[0:-6]\n k = figureFileName.rfind(\"/\") + 1\n figureFileName = figureFileName[k:]\n k = fileName.rfind(\"/\") + 1\n targetDirectory = \"\"\n if k == 0:\n targetDirectory = \"./\"\n else:\n targetDirectory = fileName[0:k]\n targetFname = targetDirectory + figureFileName + \".\" + figType\n\n # import the tasks\n tasks = parseTasks(mode, fileName)\n machines = sorted(getMachines(tasks))\n orders = sorted(getOrders(tasks))\n processingUnits = sorted(getProccessingUnits(tasks))\n operations = sorted(getOperations(tasks))\n\n if mode == \"MTS\":\n tasks = consolidateSiblingTasks(tasks, machines)\n tasks = removeDuplicateTasks(tasks)\n if checkForOverlappingTasks(tasks, machines):\n print(\"ERROR! Found overlapping tasks, check your input file!\")\n exit(5)\n\n # Print all of the read tasks in DEBUG mode\n if MYDEBUG:\n for t in tasks:\n t.print()\n\n # build the figure\n fig = plt.figure(figsize=(10, 5), dpi=DPI) # <------ USER OPTION HERE -----------------\n ax = fig.add_subplot(111)\n ax.set_title(figureFileName)\n\n # set up the axes\n y_pos = np.arange(len(machines))\n ax.set_yticks(y_pos)\n ax.set_ylim(min(y_pos) - 0.7, max(y_pos) + 0.7)\n ax.set_yticklabels(machines)\n ax.set_xlabel(\"Time (Hours)\")\n x_pos = np.arange(math.ceil(getMakeSpan(tasks))+1)\n ax.set_xticks(x_pos)\n ax.set_axisbelow(True)\n ax.grid(b=True, which=\"major\", axis=\"x\", alpha=0.5)\n\n # assign a unique color to each order and each operation\n # http://matplotlib.org/examples/color/colormaps_reference.html\n cmapOrders = plt.cm.Pastel2(np.linspace(0, 1, len(orders)))\n cmapOperations = plt.cm.Pastel2(np.linspace(0, 1, len(operations)))\n\n # plot the task rectangles\n # https://stackoverflow.com/questions/21397549/stack-bar-plot-in-matplotlib-and-add-label-to-each-section-and-suggestions\n for i, m in enumerate(machines):\n compatibleTasks = []\n for t in tasks:\n if m == t.machine:\n compatibleTasks.append(t)\n slots = [] # time slots for machine m\n for ct in compatibleTasks:\n for ct in compatibleTasks:\n thisSlot = (ct.tBegin, ct.tEnd)\n if thisSlot not in slots:\n slots.append(thisSlot)\n slots = sorted(slots)\n if mode == \"SCH\":\n for s, slt in enumerate(slots):\n thisBatchSize = \"\"\n thisOperation = \"\"\n for ct in compatibleTasks:\n if (ct.tBegin, ct.tEnd) == slt:\n thisBatchSize = ct.batchSize\n thisOperation = ct.operation\n thisColor = cmapOperations[operations.index(thisOperation)]\n h = ax.barh(i, width=slots[s][1]-slots[s][0], left=slots[s][0], align='center', color=thisColor)\n bl = h[0].get_xy()\n x = 0.5*h[0].get_width() + bl[0]\n y = 0.5*h[0].get_height() + bl[1]\n ax.text(x, y, str(thisBatchSize), ha='center',va='center')\n elif mode == \"MTS\":\n for s, slt in enumerate(slots):\n # Get the MAIN task corresponding to the current time slot\n currentTask = 0\n for ct in compatibleTasks:\n if (ct.tBegin, ct.tEnd) == slt:\n currentTask = ct\n # Plot the unique task\n if len(currentTask.subtasks) == 0:\n duration = slots[s][1]-slots[s][0]\n thisColor = cmapOrders[orders.index(currentTask.order)]\n\n h = []\n h.append(ax.barh(i, width=duration, left=slots[s][0], align='center', color=\"grey\", alpha=0.7))\n h.append(ax.barh(i, width=duration - 2*MARGIN, left=slots[s][0] + MARGIN, align='center',\n color=thisColor, height=0.65, linewidth=0))\n bl = h[0][0].get_xy()\n x = 0.5*h[0][0].get_width() + bl[0]\n y = 0.5*h[0][0].get_height() + bl[1]\n thisBatchSize = currentTask.batchSize\n ax.text(x, y, str(thisBatchSize), ha='center',va='center', size=LABEL_SIZE)\n else:\n # Plot first the MAIN task\n duration = slots[s][1]-slots[s][0]\n barHandles = []\n barHandles.append(ax.barh(i, width=duration, left=slots[s][0],\n align='center', color=\"grey\", alpha=0.7))\n bl = barHandles[0][0].get_xy()\n l = slots[s][0] + MARGIN\n # Plot the SUB tasks\n for counter, thisSub in enumerate(currentTask.subtasks):\n thisColor = cmapOrders[orders.index(thisSub.order)]\n partialDuration = (thisSub.batchSize/currentTask.batchSize) * duration - \\\n 2*MARGIN/len(currentTask.subtasks)\n barHandles.append(ax.barh(i, width=partialDuration, left=l, align='center', height=0.65, linewidth=0,\n color=thisColor))\n bl = barHandles[-1][0].get_xy()\n x = 0.5*barHandles[-1][0].get_width() + bl[0]\n y = 0.5*barHandles[-1][0].get_height() + bl[1]\n thisBatchSize = thisSub.batchSize\n ax.text(x, y, str(thisBatchSize), ha='center',va='center', size=LABEL_SIZE)\n l = l + partialDuration\n else:\n print(\"INVALID MODE\")\n exit(5)\n\n # Show / print the figure\n fig.savefig(targetFname, dpi=DPI)\n # if MYDEBUG:\n # plt.show()\n plt.clf()\n plt.close()\n\n\n # plot a legend (print in different file)\n if PLOT_LEGENDS:\n if mode == \"SCH\":\n pat = []\n leg = plt.figure(figsize=(5, 5), dpi=DPI)\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n leg.patch.set_visible(False)\n for op in operations:\n thisColor = cmapOperations[operations.index(op)]\n pat.append(mpatches.Patch(color=thisColor, label=op))\n plt.legend(handles=pat)\n leg.savefig(targetDirectory + figureFileName + \"_legend.\" + figType, dpi=DPI)\n elif mode == \"MTS\":\n pat = []\n leg = plt.figure(figsize=(5, 5), dpi= DPI)\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n leg.patch.set_visible(False)\n for ord in orders:\n thisColor = cmapOrders[orders.index(ord)]\n pat.append(mpatches.Patch(color=thisColor, label=ord))\n plt.legend(handles=pat)\n leg.savefig(targetDirectory + figureFileName + \"_legend.\" + figType, dpi=DPI)\n else:\n print(\"INVALID MODE\")\n exit(5)", "def main():\n\n # Initial message\n taq_data_tools_responses_physical_short_long.taq_initial_message()\n\n # Tickers and days to analyze\n year = '2008'\n tickers = ['AAPL', 'GOOG']\n taus_p = [x for x in range(10, 101, 10)]\n tau = 1000\n\n # Basic folders\n taq_data_tools_responses_physical_short_long.taq_start_folders(year)\n\n # Run analysis\n taq_data_plot_generator(tickers, year, tau, taus_p)\n\n print('Ay vamos!!!')\n\n return None", "def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()", "def test_cmd_desc(self, mock_gametime):\n\n mock_gametime.return_value = _get_timestamp(\"autumn\", \"afternoon\")\n\n # view base desc\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"\",\n f\"\"\"\nRoom Room(#{self.room1.id}) Season: autumn. Time: afternoon. States: None\n\nRoom state (default) (active):\nBase room description.\n \"\"\".strip(),\n )\n\n # add spring desc\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"/spring Spring description.\",\n \"The spring-description was set on Room\",\n )\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"/burning Burning description.\",\n \"The burning-description was set on Room\",\n )\n\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"\",\n f\"\"\"\nRoom Room(#{self.room1.id}) Season: autumn. Time: afternoon. States: None\n\nRoom state burning:\nBurning description.\n\nRoom state spring:\nSpring description.\n\nRoom state (default) (active):\nBase room description.\n \"\"\".strip(),\n )\n\n # remove a desc\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"/del/burning/spring\",\n \"The burning-description was deleted, if it existed.|The spring-description was\"\n \" deleted, if it existed\",\n )\n # add autumn, which should be active\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"/autumn Autumn description.\",\n \"The autumn-description was set on Room\",\n )\n self.call(\n extended_room.CmdExtendedRoomDesc(),\n \"\",\n f\"\"\"\nRoom Room(#{self.room1.id}) Season: autumn. Time: afternoon. States: None\n\nRoom state autumn (active):\nAutumn description.\n\nRoom state (default):\nBase room description.\n \"\"\".strip(),\n )" ]
[ "0.6350595", "0.5953301", "0.5721576", "0.5545668", "0.53443635", "0.5267479", "0.52153486", "0.51955545", "0.5187003", "0.50748396", "0.50713784", "0.50271684", "0.4993536", "0.49629956", "0.49515334", "0.49490532", "0.49332142", "0.49312136", "0.4919219", "0.49082175", "0.48653492", "0.48507696", "0.48448443", "0.48339102", "0.48097947", "0.47822705", "0.47687358", "0.47128707", "0.47128224", "0.47113577" ]
0.61108553
1
Cache the Blurb instances.
def _populate_blurbs(self): for blurb_id in _BLURB_IDS: if getattr(self._section, "use_blurb_%s" % blurb_id): self._blurbs.append(_Blurb(self._section, blurb_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_banners():\n banners = cache.get('banners')\n if not banners:\n banners = Banner.objects.filter(general=True)\n cache.set('banners', banners)\n return {'banners': banners}", "def _fullcache(self):\n query = {\n \"%s__id\" % self._model: self._objid,\n \"attribute__db_model__iexact\": self._model,\n \"attribute__db_attrtype\": self._attrtype,\n }\n attrs = [\n conn.attribute\n for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query)\n ]\n self._cache = dict(\n (\n \"%s-%s\"\n % (\n to_str(attr.db_key).lower(),\n attr.db_category.lower() if attr.db_category else None,\n ),\n attr,\n )\n for attr in attrs\n )\n self._cache_complete = True", "def warmup_cache(self):\n self.get_whitespace_changes()\n self.get_cvsheader_changes()\n self.get_unmodified_changes()\n self.get_used_changes()\n self.get_zapped_changes()\n self.get_undecided_changes()", "def cacheSources(self):\n page = pywikibot.Page(self.repo, u'List of wikis/python', ns=4)\n self.source_values = json.loads(page.get())\n for family_code, family in self.source_values.iteritems():\n for source_lang in family:\n self.source_values[family_code][source_lang] = pywikibot.ItemPage(self.repo,\n family[source_lang])", "def fill_request_cache():\n if not request_cache.cache.get(\"bingo_request_cache_filled\"):\n\n # Assume that we're going to grab both BingoCache and\n # BingoIdentityCache from memcache\n memcache_keys = [\n BingoCache.CACHE_KEY,\n BingoIdentityCache.key_for_identity(identity())\n ]\n\n # Try to grab BingoCache from instance cache\n bingo_instance = instance_cache.get(BingoCache.CACHE_KEY)\n if bingo_instance:\n # If successful, use instance cached version...\n request_cache.cache[BingoCache.CACHE_KEY] = bingo_instance\n # ...and don't load BingoCache from memcache\n memcache_keys.remove(BingoCache.CACHE_KEY)\n\n # Load necessary caches from memcache\n dict_memcache = memcache.get_multi(memcache_keys)\n\n # Decompress BingoCache if we loaded it from memcache\n if BingoCache.CACHE_KEY in dict_memcache:\n dict_memcache[BingoCache.CACHE_KEY] = CacheLayers.decompress(\n dict_memcache[BingoCache.CACHE_KEY])\n\n # Update request cache with values loaded from memcache\n request_cache.cache.update(dict_memcache)\n\n if not bingo_instance:\n # And if BingoCache wasn't in the instance cache already, store\n # it with a 1-minute expiry\n instance_cache.set(BingoCache.CACHE_KEY,\n request_cache.cache.get(BingoCache.CACHE_KEY),\n expiry=CacheLayers.INSTANCE_SECONDS)\n\n request_cache.cache[\"bingo_request_cache_filled\"] = True", "def cache_all(self):\n if not self._cached_all:\n poss = range(len(self))\n uuids = self.vars['uuid']\n\n cls_names = self.variables['cls'][:]\n samples_idxss = self.variables['samples'][:]\n subchanges_idxss = self.variables['subchanges'][:]\n mover_idxs = self.variables['mover'][:]\n details_idxs = self.variables['details'][:]\n try:\n input_samples_vars = self.variables['input_samples']\n except KeyError:\n # BACKWARD COMPATIBILITY: REMOVE IN 2.0\n input_samples_idxss = [[] for _ in samples_idxss]\n else:\n input_samples_idxss = input_samples_vars[:]\n\n [self._add_empty_to_cache(*v) for v in zip(\n poss,\n uuids,\n cls_names,\n samples_idxss,\n input_samples_idxss,\n mover_idxs,\n details_idxs)]\n\n [self._load_partial_subchanges(c, s) for c, s in zip(\n self,\n subchanges_idxss)]\n\n self._cached_all = True", "def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}", "def fillCache(self):\n items = self.source.getRecent()\n items.reverse() # make sure the most recent ones are added last to the cache\n for item in items:\n self.cache.append(item.title)", "async def _cache_patterns(self) -> None:\n results: [asyncpg.Record] = await db_fetch(\n self.bot.db_conn,\n \"SELECT * FROM filter\"\n )\n for result in results:\n re_compiled = re.compile(result[\"filter_pattern\"])\n self._filter_cache[result[\"server_id\"]][re_compiled] = [result[\"filter_identifier\"]]", "def cache(self):\n import hxl.filters\n return hxl.filters.CacheFilter(self)", "def __iter__(self):\n cache_key = \"countries:all:{}\".format(get_language())\n if cache_key in self._cached_lists:\n yield from self._cached_lists[cache_key]\n return\n\n val = cache.get(cache_key)\n if val:\n self._cached_lists[cache_key] = val\n yield from val\n return\n\n val = list(super().__iter__())\n self._cached_lists[cache_key] = val\n cache.set(cache_key, val, 3600 * 24 * 30)\n yield from val", "def cache(self, name: str = None) -> B[B, E]:", "def all(self):\n if not self._cache:\n self.load()\n\n return self._cache", "def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def _clear_caches(self):\n self._brushes = {}\n self._formats = {}", "def SetPersistentCache(ambler, suggestions):\n for suggestion in suggestions:\n suggestion_object = models.CachedPlace()\n suggestion_object.lat = suggestion['lat']\n suggestion_object.lng = suggestion['lng']\n suggestion_object.name = suggestion['name']\n suggestion_object.food_type = suggestion['food_type']\n suggestion_object.cost = suggestion['cost']\n suggestion_object.why_description1 = suggestion['why_description1']\n suggestion_object.why_description2 = suggestion['why_description2']\n suggestion_object.cache_timestamp = suggestion['cache_timestamp']\n suggestion_object.address = suggestion['address']\n ambler.persistent_suggestion_cache.append(suggestion_object)\n ambler.put()", "def fill_cache_table():\n products = []\n for query in ['bread', 'milk', 'rice']:\n grocery = grocery_search(query)\n products += get_all_products_from_grocery_search(grocery)\n\n orm = ORM()\n for product in products:\n orm.add_cache(**product)", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def _retrieveCachedData(self):", "def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message", "def __init__(self, *args, **kwargs):\n self._cachedict = {}", "def cache(self, *flush_fields):\r\n _cache_query = self.model._meta.db_table not in conf.CACHEBOT_TABLE_BLACKLIST\r\n return self._clone(setup=True, _cache_query=_cache_query, _flush_fields=flush_fields)", "def _cache(self):\n return self._class(self.client_servers, **self._options)", "def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)", "def _get_blacklist(self):\n blacklist = {}\n for b in TransifexBlacklist.objects.filter(domain=self.app.domain, app_id=self.app.id).all():\n blacklist.setdefault(b.domain, {})\n blacklist[b.domain].setdefault(b.app_id, {})\n blacklist[b.domain][b.app_id].setdefault(b.module_id, {})\n blacklist[b.domain][b.app_id][b.module_id].setdefault(b.field_type, {})\n blacklist[b.domain][b.app_id][b.module_id][b.field_type].setdefault(b.field_name, {})\n blacklist[b.domain][b.app_id][b.module_id][b.field_type][b.field_name][b.display_text] = True\n return blacklist", "def _cache_state(self, instance):\n if instance.pk:\n instance.__cache_data = dict((f, getattr(instance, f)) for f in self.cache_fields)\n else:\n instance.__cache_data = UNSAVED" ]
[ "0.6028274", "0.56695217", "0.55811936", "0.5542353", "0.5520321", "0.5519755", "0.5454257", "0.54364455", "0.54282826", "0.5376936", "0.5376592", "0.5370709", "0.53413415", "0.5291048", "0.5280704", "0.52741593", "0.52115685", "0.5194591", "0.51608825", "0.51608825", "0.51608825", "0.51608825", "0.51087946", "0.5094118", "0.50839293", "0.5060619", "0.50492", "0.50457615", "0.49756104", "0.4970477" ]
0.65004164
0
Hook the addoption to add the scenario and nosetup options
def pytest_addoption(parser): parser.addoption( "--scenarios", nargs='*', metavar="scenario", choices=Scenario.scenarios.keys(), help="scenarios to run, leave empty to print scenarios", ) parser.addoption( "--no-setup", action="store_true", help="Disable setup and teardown", default=False )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_addoption(parser):\n\n parser.addoption(\n \"--skip_config\",\n action=\"store_true\",\n help=\"Apply new configurations on DUT\"\n )\n\n parser.addoption(\n \"--config_only\",\n action=\"store_true\",\n help=\"Apply new configurations on DUT\"\n )", "def pytest_addoption(parser):\n add_cli_options(parser)", "def pytest_addoption(parser):\n parser.addoption(\"--cases\", help=\"Test cases to run\")", "def pytest_addoption(parser):\n referencepytest.addoption(parser)", "def _additional_option(self):\n pass", "def pytest_addoption(parser):\n parser.addoption('--with_qai', action='store_true', dest=\"with_qai\",\n default=False, help=\"enable tests that access James' test version of QAI\")\n parser.addoption('--with_chemstock', action='store_true', dest=\"with_chemstock\",\n default=False, help=\"enable tests that access a YAML file containing chemstock data\")\n parser.addoption('--dump_chemstock', action='store_true', dest=\"dump_chemstock\",\n default=False, help=\"\"\"Perform a test that generates a YAML-file\ndump of the QAI system. This can be used in tests marked --with_chemstock.\"\"\")\n parser.addoption('--with_cs_qai', action='store_true', dest=\"with_cs_qai\",\n default=False, help=\"enable tests that update chemstock from QAI\")\n # add the option for the scospec plugin\n parser.addoption(\n '--scospec', action='store', dest='scospecfile', type=str,\n help='Report test results to a YAML file in Scospec format.'\n )", "def setup(self, optparser):\n\t\tpass", "def pytest_addoption(parser):\n group = parser.getgroup(\"molecule\")\n\n _addoption(\n group,\n parser,\n \"molecule_unavailable_driver\",\n None,\n \"What marker to add to molecule scenarios when driver is \"\n \"unavailable. (ex: skip, xfail). Default: None\",\n )\n _addoption(\n group,\n parser,\n \"molecule_base_config\",\n None,\n \"Path to the molecule base config file. The value of this option is \"\n \"passed to molecule via the --base-config flag. Default: None\",\n )\n _addoption(\n group,\n parser,\n \"skip_no_git_change\",\n None,\n \"Commit to use as a reference for this test. If the role wasn't\"\n \"changed since this commit skip the test. Default: None\",\n )", "def pytest_addoption(parser):\n # ignore datasources\n parser.addoption('--ignore-datasources', action='store_true', default=False,\n help='Ignore the datasource marker applied to tests')", "def pytest_addoption(parser):\n parser.addoption(\"--address\", action=\"store\", default=\"http://192.168.56.103/\",\n help=\"Opencart web address\")\n parser.addoption(\"--browser\", action=\"store\", default=\"chrome\", help=\"Browser name\")\n parser.addoption(\"--username\", action=\"store\", default=\"admin\", help=\"User Name\")\n parser.addoption(\"--password\", action=\"store\", default=\"admin\", help=\"User Password\")\n parser.addoption(\"--iwait\", action=\"store\", default=\"30000\", help=\"Implicitly wait parameter\")\n parser.addoption(\"--pltimeout\", action=\"store\", default=\"1000\", help=\"Page load timeout\")\n parser.addoption(\"--productname\", action=\"store\", default=\"New Product\", help=\"Product Name\")\n parser.addoption(\"--keywords\", action=\"store\",\n default=\"New Meta Tag Keyword\",\n help=\"Meta Tag Keyword\")\n parser.addoption(\"--modelname\", action=\"store\", default=\"New model\", help=\"Model Name\")\n parser.addoption(\"--report\", action=\"store\", default=\"--alluredir /allure_reports\")", "def pytest_addoption(parser):\n parser.addoption(\"--run-flaky\", action=\"store_true\",\n help=\"runs flaky tests\")\n parser.addoption(\"--run-network-tests\", action=\"store_true\",\n help=\"runs tests requiring a network connection\")", "def pytest_addoption(parser):\n\n parser.addoption(\n '--url',\n action='store',\n default=CONF.DEFAULT_URL,\n help=f'Base URL for API, by default - {CONF.DEFAULT_URL}.'\n )\n\n parser.addoption(\n '--delay',\n action='store',\n type=int,\n default=CONF.DEFAULT_REQUEST_DELAY,\n help=f'Delay between API requests, by default - {CONF.DEFAULT_REQUEST_DELAY}.'\n )", "def pytest_addoption(parser):\n parser.addoption(\n \"--use-real-aws-may-incur-charges\", action=\"store_true\", default=False,\n help=\"Connect to actual AWS services while testing. WARNING: THIS MAY INCUR \"\n \"CHARGES ON YOUR ACCOUNT!\"\n )", "def pytest_addoption(parser):\n parser.addoption(\"--accesskey\")\n parser.addoption(\"--url\")", "def pytest_addoption(parser):\n parser.addoption(\"--address\", action=\"store\",\n default=\"http://192.168.145.130/\", help=\"Opencart web address\")\n parser.addoption(\"--browser\", action=\"store\", default=\"chrome\", help=\"Browser name\")\n parser.addoption(\"--wait\", action=\"store\", default=10, help=\"Implicity wait\")", "def pytest_addoption(parser):\n parser.addoption(\"--lm\",\n action=\"store\",\n #required=True,\n help=\"Binary directory of Lightmetrica\")\n parser.addoption(\"--attach\",\n action=\"store_true\",\n help=\"Wait some seconds for being attached by a debugger\")", "def pytest_addoption(parser):\n parser.addoption(\"--usertoken\",\n action=\"store\",\n help=\"amivapi user token\")\n parser.addoption(\"--admintoken\",\n action=\"store\",\n help=\"amivapi pvk admin token\")", "def add_option(self, *args, **kwargs):\r\n calling_module = Inspection.find_calling_module()\r\n added_option = self._get_option_from_args(args, kwargs)\r\n self._add_option(calling_module, added_option)", "def pytest_addoption(parser):\n parser.addoption('--account-name', help='ADLS Gen2 account name e.g. ``sandboxgen2``')\n parser.addoption('--dns-suffix', help='ADLS Gen2 dns suffix e.g. ``dfs.core.net``',\n default=constants.DEFAULT_DNS_SUFFIX)\n parser.addoption('--create-filesystem', action='store_true',\n help='Create filesystem.')\n parser.addoption('--filesystem-id', help='ADLS Gen2 filesystem identifier')", "def pytest_addoption(parser):\n add_parser_options(parser.addoption, with_defaults=False)\n\n parser.addini(\n \"tavern-global-cfg\",\n help=\"One or more global configuration files to include in every test\",\n type=\"linelist\",\n default=[],\n )\n parser.addini(\n \"tavern-http-backend\", help=\"Which http backend to use\", default=\"requests\"\n )\n parser.addini(\n \"tavern-mqtt-backend\", help=\"Which mqtt backend to use\", default=\"paho-mqtt\"\n )\n parser.addini(\n \"tavern-strict\",\n help=\"Default response matching strictness\",\n type=\"args\",\n default=None,\n )\n parser.addini(\n \"tavern-beta-new-traceback\",\n help=\"Use new traceback style (beta)\",\n type=\"bool\",\n default=False,\n )\n parser.addini(\n \"tavern-always-follow-redirects\",\n help=\"Always follow HTTP redirects\",\n type=\"bool\",\n default=False,\n )", "def addOption(self, parser):\n pass", "def handle_option(self, option, options):\n pass", "def add_options(self):\n self.add_option_save()\n self.add_option_enable()", "def pytest_addoption(parser):\n parser.addoption(\"-c\", action=\"store\", help=\" -c <test config file>\")", "def pytest_addoption(parser):\n parser.addoption(\"--browser\")", "def pytest_addoption(parser):\n DEFAULT_DUMP_FILE = \".intercepted\"\n\n parser.addoption(\n \"--intercept-remote\",\n dest=\"intercept_remote\",\n action=\"store_true\",\n default=False,\n help=\"Intercepts outgoing connections requests.\",\n )\n parser.addoption(\n \"--remote-status\",\n dest=\"remote_status\",\n action=\"store\",\n nargs=\"?\",\n const=\"show\",\n default=\"no\",\n help=\"Reports the status of intercepted urls (show/only/no).\",\n )\n parser.addini(\n \"intercept_dump_file\",\n \"filepath at which intercepted requests are dumped\",\n type=\"string\",\n default=DEFAULT_DUMP_FILE,\n )", "def add_option(self, label, action, type_func, shortcut):\n pass", "def pytest_addoption(parser):\n group = parser.getgroup('terminal reporting')\n group.addoption('--dbus',\n dest='dbus',\n default=True,\n help='Enable D-Bus notifications.')", "def add_option(self, option, desc, command):\n self.__options[option] = (desc, command)", "def pytest_addoption(parser):\n\n def is_true(val):\n '''\n return whether the provided string is intended to mean boolean True\n '''\n return re.match(r'^(true|yes|1|on)$', val, re.IGNORECASE) is not None\n\n # Update .default and .help for '--credentials' option\n group = parser.getgroup('credentials', 'credentials')\n for opt in group.options:\n if opt.dest == 'credentials_file' and opt.default == optparse.NO_DEFAULT:\n opt.default = 'credentials.yml'\n opt.help = opt.help + \" (default: %default)\"\n\n # Update .default and .help for '--baseurl' option\n group = parser.getgroup('selenium', 'selenium')\n for opt in group.options:\n if opt.dest == 'base_url' and opt.default == '':\n if test_config.has_option('general', 'baseurl'):\n opt.default = test_config.get('general', 'baseurl')\n opt.help = opt.help + \" (default: %default)\" #% opt.default\n break\n\n # Callback helper for list-style parameters\n def list_callback(option, opt_str, value, parser, *args, **kwargs):\n '''\n Alters the built-in action='append' behavior by replacing the\n defaults, with any values provided on the command-line.\n '''\n # Start with a fresh-hot list\n values = re.split('[,\\s]*', value)\n # If the current option value isn't the default, append\n if getattr(parser.values, option.dest) != option.default:\n values += getattr(parser.values, option.dest)\n setattr(parser.values, option.dest, values)\n\n # Add --logfile option to existing 'termincal reporting' option group\n optgrp = parser.getgroup(\"terminal reporting\")\n optgrp.addoption(\"--logfile\", action=\"store\", dest='logfile',\n default=test_config.get('general', 'log_file', ''),\n help=\"Specify a file to record logging information (default: %default)\")\n\n # Create a general test options\n optgrp = parser.getgroup('general_options', \"General Test Options\")\n optgrp.addoption(\"--config\", action=\"store\", dest=\"cfg_file\",\n default=cfg_file, type=\"string\",\n help=\"Specify test configuration file (default: %default)\")\n\n # TODO - Move to app-specific optgrp\n optgrp.addoption(\"--project\", action=\"store\", dest='project', default=None,\n help=\"Specify project (e.g. sam, headpin, katello, katello.cfse, aeolus, cfce)\")\n\n optgrp.addoption(\"--project-version\", action=\"store\",\n dest='project-version', default='1.1',\n help=\"Specify project version number (default: %default)\")\n\n optgrp.addoption(\"--enable-ldap\", action=\"store_true\", dest='enable-ldap',\n default=test_config.getboolean('general', 'enable-ldap'),\n help=\"Specify whether LDAP authentication is enabled (default: %default)\")\n\n optgrp.addoption(\"--test-cleanup\", action=\"store_true\",\n dest='test-cleanup', default=False,\n help=\"Specify whether to cleanup after test completion (default: %default)\")\n\n optgrp.addoption(\"--releasever\", dest='releasevers', type=\"string\",\n action=\"callback\", callback=list_callback,\n default=test_config.getlist('general', 'releasevers'),\n help=\"Specify the release of the desired system templates (default: %default)\")\n\n optgrp.addoption(\"--arch\", \"--basearch\", dest='basearchs', type=\"string\",\n action=\"callback\", callback=list_callback,\n default=test_config.getlist('general', 'basearchs'),\n help=\"Specify the architecture of the desired system templates (default: %default)\")\n\n optgrp.addoption(\"--instance-password\", dest='instance-password',\n type=\"string\", action=\"store\",\n default=test_config.get('general', 'instance-password'),\n help=\"Specify the default password for applications (default: %default)\")\n\n # Allow generic access to all parameters within cloudforms.cfg\n optgrp.addoption(\"--keyval\", action=\"append\",\n dest='keyval', default=[],\n help=\"Specify key=val pairs to override config values\")\n\n # TODO - add parameters for each cloudforms.cfg [katello] option\n optgrp = parser.getgroup('katello_options', \"Katello Test Options (--project=katello)\")\n optgrp.addoption(\"--katello-url\", action=\"store\", dest='katello-url',\n default=test_config.get('katello', 'katello-url', raw=True),\n help=\"Specify URL for katello (default: %default)\")\n\n optgrp.addoption(\"--katello-env\", action=\"store\", dest='katello-env',\n default=test_config.get('katello', 'env'),\n help=\"Specify default environment (default: %default)\")\n\n optgrp.addoption(\"--katello-org\", action=\"store\", dest='katello-org',\n default=test_config.get('katello', 'org'),\n help=\"Specify default organization (default: %default)\")\n\n # TODO - add parameters for each cloudforms.cfg [aeolus] option\n optgrp = parser.getgroup('aeolus_options', \"Aeolus Test Options (--project=aeolus)\")\n optgrp.addoption(\"--aeolus-url\", action=\"store\", dest='aeolus-url',\n default=test_config.get('aeolus', 'aeolus-url', raw=True),\n help=\"Specify URL for aeolus (default: %default)\")\n\n optgrp.addoption(\"--aeolus-provider\", dest='aeolus-providers', type=\"string\",\n action=\"callback\", callback=list_callback,\n default=test_config.getlist('aeolus', 'providers'),\n help=\"Specify which cloud providers will be tested (default: %default)\")\n\n #optgrp.addoption(\"--aeolus-configserver\", dest='aeolus-configservers', type=\"string\",\n # action=\"callback\", callback=list_callback,\n # default=test_config.getlist('aeolus', 'configserver'),\n # help=\"Specify which providers to build a configserver in (default: %default)\")\n\n optgrp.addoption(\"--aeolus-template-url\", action=\"store\",\n dest='aeolus-template-url', default=test_config.get('aeolus', 'template-url'),\n help=\"Specify URL format string for system templates (default: %default)\")\n\n optgrp.addoption(\"--aeolus-custom-blueprint\", action=\"store\",\n dest='aeolus-custom-blueprint', default=test_config.get('aeolus', 'custom-blueprint'),\n help=\"Specify custom application blueprint template (default: %default)\")\n\n optgrp.addoption(\"--ec2-tunnel-ports\", action=\"store\",\n dest='ec2-tunnel-ports', default=test_config.get('general', 'ec2_tunnel_ports'),\n help=\"Specify SSH tunnel ports for EC2 (default: %default)\")" ]
[ "0.7230701", "0.7164577", "0.7153265", "0.70948154", "0.69381326", "0.69068253", "0.6897014", "0.6809286", "0.6781068", "0.6775993", "0.67368597", "0.67231834", "0.67189914", "0.67125493", "0.66949624", "0.6657753", "0.66511136", "0.66445357", "0.6621623", "0.6599791", "0.65914196", "0.654557", "0.65392166", "0.64684063", "0.64638007", "0.64594126", "0.64591926", "0.64461213", "0.64451045", "0.642171" ]
0.79354495
0
Hook the cmdline main to check if test should be run or only list available scenarios
def pytest_cmdline_main(config): try: if len(config.option.scenarios) == 0: print("Available scenarios:") for scenario in Scenario.scenarios.values(): print(f" {scenario.name} - {scenario.description}") return 0 except: pass return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pre_cli_list(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-l\"))\n assert \"test.yml\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def test_validargs(clickrunner):\n for args in maincli.valid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"Missing command\" in result.output", "def main_parse_args(args):\n # Must return so that check command return value is passed back to calling routine\n # otherwise py.test will fail\n return main(parse_args(args))", "def main_parse_args(args):\n # Must return so that check command return value is passed back to calling routine\n # otherwise py.test will fail\n return main(parse_args(args))", "def test_usage(clickrunner):\n result = clickrunner.invoke(maincli.entrypoint)\n assert result.exit_code == 0\n assert \"Usage\" in result.output\n for valid_subcmd in maincli.valid_subcmds:\n assert valid_subcmd in result.output\n for invalid_subcmd in maincli.invalid_subcmds:\n assert invalid_subcmd not in result.output", "def ok_to_run(self):\n # READING DOC STRING, LOOKING FOR VERSION\n doc_dict = self.doc_dict\n skip_test = False\n msg = ''\n if 'deprecated' in doc_dict:\n msg = \"This test has been deprecated\"\n skip_test = True\n elif 'version' in doc_dict and int(self.core.config.get('TestRun', 'driver_version')) < doc_dict['version']:\n msg = \"Features unavailable in this version: {}\".format(doc_dict['version'])\n skip_test = True\n elif 'datacenters' in doc_dict and len([s for s in doc_dict['datacenters'] if s in self.core.config.get('TestRun', 'datacenters')]) == 0:\n msg = \"Test only works in {}\".format(doc_dict['datacenters'])\n skip_test = True\n elif 'no_environment' in doc_dict and self.core.config.get('TestRun', 'datacenters').upper() in doc_dict['no_environment']:\n msg = \"Test does not work in {}\".format(doc_dict['no_environment'])\n skip_test = True\n if skip_test:\n self.core.write(\"\\n\" + \"_\" * 40 + \"\\n{}\".format(msg), level='error')\n if self.core.driver is not None:\n self.core.driver.close_driver()\n self.core.driver_state = False\n self.skipTest(msg)", "def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def test_pre_cli_init(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \"test\"))\n assert \"test\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \":test\"))\n assert \"does not exist\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def test_by_config(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_config(addon))\n\n self.run_mgr.by_default(self.cli_inst)\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Start'))\n self.assertTrue(output[1].startswith('Execute'))\n self.assertTrue(output[2].startswith('Stop'))", "def test_by_default(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_default(addon))\n self.run_mgr.by_config(self.fileio_inst)\n\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Starting'))\n self.assertTrue(output[1].startswith('Executing'))\n self.assertTrue(output[2].startswith('Stopping'))", "def test_get_scenarios(self):\n pass", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def has_test(args):\n return (args.test_set or args.test_source or args.test_dataset or\n args.test_stdin or args.test_datasets)", "def test_main_minimal(self, capsys):\n UI.main(**self.args)\n captured = capsys.readouterr().out\n assert self.stdout_output in captured", "def __main() :\n launchTests()", "def run_list_cli_tests(experiment_id: int) -> None:\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-trials\", str(experiment_id)]\n )\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-checkpoints\", str(experiment_id)]\n )\n subprocess.check_call(\n [\n \"det\",\n \"-m\",\n conf.make_master_url(),\n \"experiment\",\n \"list-checkpoints\",\n \"--best\",\n str(1),\n str(experiment_id),\n ]\n )", "def cli(ctx, keep_going, dry_run, with_tests, all_tests):\n ctx.obj = {'extra-args': []}\n if keep_going:\n ctx.obj['extra-args'].append('--keep-going')\n ctx.obj['dry_run'] = dry_run\n ctx.obj['tests'] = with_tests\n ctx.obj['no-blacklist'] = all_tests", "def run_starter(self, expect_to_fail=False):", "def main():\n run_test_all()", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def all(\n command,\n):\n # If we get to this point all tests listed in 'pre' have passed\n # unless we have run the task with the --warn flag\n if not command.config.run.warn:\n print(\n \"\"\"\nAll Checks Passed Successfully\n==========================================\n\"\"\"\n )", "def test_pre_cli_generation(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-o\", \"test\"))\n assert \"saved\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-o\", \":test\"))\n assert \"filenames\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-o\", \"test.\"))\n assert \"Filenames\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-o\", \"CON\"))\n assert \"OS reserved\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.main, [], *(\"\", \"noop\"))\n assert \"usage\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def test_CLI_minimal(self, capsys):\n sys.argv = self.common_args + [\"-l\", \"Berger_POPC\"]\n UI.entry_point()\n captured = capsys.readouterr().out\n assert \"Results written to OP_buildH.out\" in captured", "def test_cli_boolean_args(\n config,\n):\n args = CLI.parse_args([\"--version\"])\n assert args.version is True\n\n args = CLI.parse_args([\"--test\"])\n assert args.test is True\n\n args = CLI.parse_args([\"--print-config-file\"])\n assert args.print_config_file is True\n\n args = CLI.parse_args([\"-T\"])\n assert args.check_login is True", "def pytest_addoption(parser):\n parser.addoption(\"--cases\", help=\"Test cases to run\")", "def main(*args):\n if args and args[0].startswith('mds'):\n testmds(*args[1:])\n else:\n test(*args[1:])", "def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)" ]
[ "0.69539845", "0.65052825", "0.64269286", "0.64269286", "0.6341802", "0.63390857", "0.63270456", "0.6313408", "0.63101155", "0.62681425", "0.6258", "0.6253013", "0.625126", "0.62502426", "0.62323487", "0.6226787", "0.62109715", "0.620348", "0.6165653", "0.61612827", "0.61255956", "0.6123538", "0.6117141", "0.6115071", "0.6058772", "0.6052606", "0.6045069", "0.604461", "0.60218596", "0.601629" ]
0.78428197
0
Hook pytest test generation to parametrize the tests to each scenario they are supposed to be included in and to generate the setup and teardown tests for each scenario.
def pytest_generate_tests(metafunc): # test is setup or teardown - parametrize to all scenarios if metafunc.function.__name__ in ["test_setup", "test_teardown"]: metafunc.parametrize( "scenario", Scenario.scenarios.values()) # parameterize test for each scenario it is included in else: metafunc.parametrize( "scenario", metafunc.cls._scenarios)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_generate_tests(metafunc):\n if \"retrospective\" in metafunc.fixturenames:\n metafunc.parametrize(\"retrospective\", [False, True])\n if \"test_type\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_type\", [FILES_TEST, STATE_TEST])\n if \"raise_error\" in metafunc.fixturenames:\n metafunc.parametrize(\"raise_error\", [False, True])", "def pytest_generate_tests(metafunc):\n for param in ['env', 'browser', 'logging_level', 'env_file', 'name', 'jenkins_url', 'slack', 'output', 'email_retries',\n 'email_search_errors']:\n option_value = getattr(metafunc.config.option, param)\n if param in metafunc.fixturenames:\n metafunc.parametrize(param, [option_value], scope='session')", "def pytest_generate_tests(metafunc):\n parent_conftest.pytest_generate_tests(metafunc, __file__)", "def pytest_generate_tests(metafunc):\n if \"worker_type\" in metafunc.fixturenames:\n test_params = [[\"thread\", 1, 1], [\"thread\", 2, 2]]\n # if the OS is not Windows / OS X and python version > 2.7 then also do the multiprocess workers testing.\n if platform.system() not in [\"Windows\", \"Darwin\"] and sys.version_info >= (\n 2,\n 7,\n ):\n test_params.extend([[\"process\", 1, 1], [\"process\", 2, 2]])\n\n metafunc.parametrize(\n \"worker_type, workers_count, worker_sessions_count\", test_params\n )", "def pytest_generate_tests(metafunc):\n testcases_file = metafunc.config._nrfu['testcase_dir'].joinpath(\n 'testcases-cabling.json')\n\n metafunc.parametrize('testcase',\n json.load(testcases_file.open()),\n ids=nrfu.name_test)", "def pytest_generate_tests(metafunc):\n if \"size1\" in metafunc.fixturenames and \"size2\" in metafunc.fixturenames:\n metafunc.parametrize(\n [\"size1\", \"size2\"], itertools.product([1, 4], [2, 8]))\n if \"lines\" in metafunc.fixturenames:\n metafunc.parametrize(\"lines\", [[], [\"line1\"], [\"line1\", \"line2\"]])", "def pytest_generate_tests(metafunc):\n\t\n\tif not metafunc.cls:\n\t\treturn\n\t\n\tinst = metafunc.cls()\n\t\n\tif 'valid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('valid', inst.valid)\n\t\n\tif 'invalid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('invalid', inst.invalid)", "def pytest_generate_tests(metafunc):\n if 'browser' in metafunc.fixturenames:\n if os.environ.get('E2E', 'no').lower() != 'yes':\n pytest.skip(\n \"End-to-end tests skipped because E2E environment variable \"\n \"was not set to 'yes'.\")\n\n # Parameterize test based on list of browsers.\n browsers = os.environ.get('E2E_WEBDRIVER_BROWSERS', 'Chrome').split()\n metafunc.parametrize('browser', browsers, indirect=True)", "def pytest_generate_tests(metafunc):\n from datastructures.tests._test_trees_data import \\\n ids, \\\n inputs, \\\n expected_list, \\\n expected_items_list, \\\n expected_tree, \\\n expected_items_tree, \\\n expected_len, \\\n expected_valid_BST, \\\n shuffled_inputs, \\\n is_equal\n\n if 'get_test_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_list_data',\n list(zip(inputs, expected_list)),\n ids=ids)\n\n if 'get_test_items_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_list_data',\n list(zip(inputs, expected_items_list)),\n ids=ids)\n\n if 'get_test_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_tree_data',\n list(zip(inputs, expected_tree)),\n ids=ids)\n\n if 'get_test_items_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_tree_data',\n list(zip(inputs, expected_items_tree)),\n ids=ids)\n\n if 'get_test_len_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_len_data',\n list(zip(inputs, expected_len)),\n ids=ids)\n\n if 'get_test_valid_BST_glassbox' in metafunc.fixturenames:\n metafunc.parametrize('get_test_valid_BST_glassbox',\n list(zip(inputs, expected_valid_BST)),\n ids=ids)\n\n if 'get_test_eq' in metafunc.fixturenames:\n metafunc.parametrize('get_test_eq',\n list(zip(inputs, shuffled_inputs, is_equal)),\n ids=ids)", "def pytest_generate_tests(metafunc):\n if \"maptype\" in metafunc.fixturenames:\n metafunc.parametrize(\"maptype\", ALL_ATTMAPS)", "def pytest_generate_tests(metafunc):\n def get_schema_name(schema_path):\n \"\"\"Helper function to return the informative part of a schema path\"\"\"\n print(schema_path)\n path = os.path.normpath(schema_path)\n return os.path.sep.join(path.split(os.path.sep)[-3:])\n\n def create_schema_example_id(argval):\n \"\"\"Helper function to create test ID for schema example validation\"\"\"\n if argval[0] == '/':\n # ID for the first argument is just the schema name\n return get_schema_name(argval)\n else:\n # This will cause pytest to create labels of the form:\n # SCHEMA_NAME-example\n # If there are multiple examples within a single schema, the\n # examples will be numbered automatically to distinguish them\n return \"example\"\n\n if metafunc.function is test_validate_schema:\n metafunc.parametrize(\n 'schema_path',\n generate_schema_list(),\n # just use the schema name as a test ID instead of full path\n ids=get_schema_name)\n elif metafunc.function is test_schema_example:\n metafunc.parametrize(\n 'filename,example',\n generate_example_schemas(),\n ids=create_schema_example_id)", "def pytest_generate_tests(self, metafunc):\n\n # function for pretty test name\n def id_func(x):\n return \"-\".join([f\"{k}={v}\" for k, v in x.items()])\n\n # get arguments for the test function\n funcarglist = metafunc.cls.params.get(metafunc.function.__name__, None)\n if funcarglist is None:\n return\n else:\n # equivalent of pytest.mark.parametrize applied on the metafunction\n metafunc.parametrize(\"fields\", funcarglist, ids=id_func)", "def pytest_generate_tests(metafunc):\n if \"expected_failure\" in metafunc.fixturenames:\n modpath = os.path.dirname(metafunc.module.__file__)\n pattern = os.path.join(modpath, \"RST???\", \"*.py\")\n metafunc.parametrize(\n \"expected_failure\",\n [os.path.relpath(p, modpath) for p in sorted(glob.glob(pattern))],\n )", "def pytest_generate_tests(metafunc):\n if (\"solver\" in metafunc.fixturenames\n and \"coefficients\" in metafunc.fixturenames):\n _parametrize_solver_coefficients(metafunc)", "def test_generate_all_testing(self):\n pass", "def generate_tests(self, fixture):\n if fixture.startswith(\"splunk_searchtime_fields\"):\n yield from self.dedup_tests(\n self.fieldtest_generator.generate_tests(fixture),\n fixture\n )\n elif fixture.startswith(\"splunk_searchtime_cim\"):\n yield from self.dedup_tests(\n self.cim_test_generator.generate_tests(fixture),\n fixture\n )", "def pytest_generate_tests(metafunc):\n if not \"bpftrace_condition\" in metafunc.fixturenames:\n raise RuntimeError(\"Invalid test case.\")\n spec_file = metafunc.config.option.spec_file\n spec_dir = os.path.dirname(os.path.realpath(spec_file.name))\n spec = json.load(spec_file)\n conditions = []\n # Generate a list of conditions to evaluate\n for test_case in spec['cases']:\n bpftrace_vars = collect_test_results(test_case, spec_dir)\n for condition in test_case['conditions']:\n conditions.append((test_case['name'], condition, bpftrace_vars))\n\n # Parameterize the conditions so that the test function gets run for each condition\n # We also set the ids of the functions to be \"name: condition\" for better reporting\n metafunc.parametrize(\"bpftrace_condition\", conditions, ids=map(\n lambda c: f\"{c[0]}: {c[1]}\", conditions))", "def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase", "def pytest_generate_tests(metafunc):\n # Launch EC2 mocking and env preparation\n mock_sqs.start()\n mock_sqs.create_env(queues, region)\n\n account = Account(region=region)\n\n checker = SQSPolicyChecker(account)\n checker.check()\n\n for sqs_queue in checker.queues:\n sqs_queue.restrict_policy()\n\n checker_remediated = SQSPolicyChecker(account)\n checker_remediated.check()\n\n sqs_queues = [(queue, False) for queue in checker.queues]\n sqs_queues += [(queue, True) for queue in checker_remediated.queues]\n\n # create test cases for each response\n metafunc.parametrize(\"queue,remediated\", sqs_queues, ids=ident_test)", "def pytest_runtest_setup(item):\n if not item.originalname == \"test_setup\":\n scenario = scenario_re.match(item.name).groups()[0]\n if _scenario_setup_failed[scenario]:\n pytest.skip(f\"Setup for {scenario} failed, skipping...\")", "def pytest_generate_tests_abstract(metafunc):\n if 'content' in metafunc.fixturenames:\n content = getattr(metafunc.function, '_content', None)\n if isinstance(content, list):\n metafunc.parametrize('content', [content])\n else:\n metafunc.parametrize('content', [[]])", "def pytest_runtest_setup(item):\n if hasattr(item, 'fixturenames') and LOOP_KEY not in item.fixturenames:\n item.fixturenames.append(LOOP_KEY)", "def test_get_scenarios(self):\n pass", "def pytest_collection_modifyitems(session, config, items):\n\n test_scenarios = DefaultDict(TestSuite)\n\n config_scenarios = config.option.scenarios\n filter_scenarios = config_scenarios is not None\n skip_setup = config.option.no_setup\n\n if skip_setup and (not filter_scenarios or len(config_scenarios) > 1):\n raise RuntimeWarning(\n \"Configured no setup and teardown but more then one scenario is configured\")\n\n # Group tests by scenario in to TestSuite objects\n for test in items:\n scenario = scenario_re.match(test.name).groups()[0]\n\n if filter_scenarios and should_skip_scenario(scenario, config_scenarios):\n continue\n\n if test.originalname == \"test_setup\":\n test_scenarios[scenario].setup = test\n elif test.originalname == \"test_teardown\":\n test_scenarios[scenario].teardown = test\n else:\n test_scenarios[scenario].tests.append(test)\n\n # Clear all pytest tests\n items.clear()\n\n # Order tests\n for scenario in test_scenarios.values():\n if len(scenario.tests) != 0:\n suite = scenario.tests\n if not skip_setup:\n suite.insert(0, scenario.setup)\n suite.append(scenario.teardown)\n\n items.extend(suite)", "def generate_yaml_tests(directory):\n for yml_file in directory.glob(\"*/*.yml\"):\n data = yaml.safe_load(yml_file.read_text())\n assert \"cases\" in data, \"A fixture needs cases to be used in testing\"\n\n # Strip the parts of the directory to only get a name without\n # extension and resolver directory\n base_name = str(yml_file)[len(str(directory)) + 1:-4]\n\n base = data.get(\"base\", {})\n cases = data[\"cases\"]\n\n for i, case_template in enumerate(cases):\n case = base.copy()\n case.update(case_template)\n\n case[\":name:\"] = base_name\n if len(cases) > 1:\n case[\":name:\"] += \"-\" + str(i)\n\n if case.pop(\"skip\", False):\n case = pytest.param(case, marks=pytest.mark.xfail)\n\n yield case", "def generate_yaml_tests(directory):\n for yml_file in directory.glob(\"*.yml\"):\n data = yaml.safe_load(yml_file.read_text())\n assert \"cases\" in data, \"A fixture needs cases to be used in testing\"\n\n # Strip the parts of the directory to only get a name without\n # extension and resolver directory\n base_name = str(yml_file)[len(str(directory)) + 1:-4]\n\n base = data.get(\"base\", {})\n cases = data[\"cases\"]\n\n for resolver in 'legacy', '2020-resolver':\n for i, case_template in enumerate(cases):\n case = base.copy()\n case.update(case_template)\n\n case[\":name:\"] = base_name\n if len(cases) > 1:\n case[\":name:\"] += \"-\" + str(i)\n case[\":name:\"] += \"*\" + resolver\n case[\":resolver:\"] = resolver\n\n skip = case.pop(\"skip\", False)\n assert skip in [False, True, 'legacy', '2020-resolver']\n if skip is True or skip == resolver:\n case = pytest.param(case, marks=pytest.mark.xfail)\n\n yield case", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def test_generate(monkeypatch, capsys):\n monkeypatch.setattr(sys, \"argv\", [\"\", \"generate\", os.path.join(PATH, \"generate.feature\")])\n main()\n out, err = capsys.readouterr()\n assert out == textwrap.dedent(\n '''\n # coding=utf-8\n \"\"\"Code generation feature tests.\"\"\"\n\n from pytest_bdd import (\n given,\n scenario,\n then,\n when,\n )\n\n\n @scenario('scripts/generate.feature', 'Given and when using the same fixture should not evaluate it twice')\n def test_given_and_when_using_the_same_fixture_should_not_evaluate_it_twice():\n \"\"\"Given and when using the same fixture should not evaluate it twice.\"\"\"\n\n\n @given('1 have a fixture (appends 1 to a list) in reuse syntax')\n def have_a_fixture_appends_1_to_a_list_in_reuse_syntax():\n \"\"\"1 have a fixture (appends 1 to a list) in reuse syntax.\"\"\"\n raise NotImplementedError\n\n\n @given('I have an empty list')\n def i_have_an_empty_list():\n \"\"\"I have an empty list.\"\"\"\n raise NotImplementedError\n\n\n @when('I use this fixture')\n def i_use_this_fixture():\n \"\"\"I use this fixture.\"\"\"\n raise NotImplementedError\n\n\n @then('my list should be [1]')\n def my_list_should_be_1():\n \"\"\"my list should be [1].\"\"\"\n raise NotImplementedError\n\n '''[\n 1:\n ].replace(\n u\"'\", u\"'\"\n )\n )", "def parameterized_test_case(cls):\n tests_to_remove = []\n tests_to_add = []\n for key, val in vars(cls).items():\n # Only process tests with build data on them\n if key.startswith('test_') and val.__dict__.get('build_data'):\n to_remove, to_add = process_parameterized_function(\n name=key,\n func_obj=val,\n build_data=val.__dict__.get('build_data')\n )\n tests_to_remove.extend(to_remove)\n tests_to_add.extend(to_add)\n\n # Add all new test functions\n [setattr(cls, name, func) for name, func in tests_to_add]\n\n # Remove all old test function templates (if they still exist)\n [delattr(cls, key) for key in tests_to_remove if hasattr(cls, key)]\n return cls", "def spec_tests():\n pass" ]
[ "0.7739637", "0.75435764", "0.74441427", "0.7410751", "0.73575944", "0.72835654", "0.7271091", "0.7114842", "0.7086104", "0.70699865", "0.704308", "0.7039083", "0.70184344", "0.6836622", "0.68358666", "0.67640394", "0.65853935", "0.6499627", "0.6490155", "0.6441901", "0.63703644", "0.6317907", "0.6284426", "0.6270256", "0.62585956", "0.6251059", "0.6234591", "0.6198478", "0.6152524", "0.61419106" ]
0.84071386
0
Checks if the scenario parametrized to test should be filtered by what is set in config_scenarios
def should_skip_scenario(scenario, config_scenarios): for config_scenario in config_scenarios: if scenario == Scenario.scenarios[config_scenario].__name__: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches_config(cls, config):\n return (not config.measures) or all(me in cls.available_measures for me in config.measures)", "def _has_testcase_with_same_params(testcase, testcase_map):\n for other_testcase_id in testcase_map:\n # yapf: disable\n if (testcase.project_name ==\n testcase_map[other_testcase_id].project_name and\n testcase.crash_state ==\n testcase_map[other_testcase_id].crash_state and\n testcase.crash_type ==\n testcase_map[other_testcase_id].crash_type and\n testcase.security_flag ==\n testcase_map[other_testcase_id].security_flag and\n testcase.one_time_crasher_flag ==\n testcase_map[other_testcase_id].one_time_crasher_flag):\n return True\n # yapf: enable\n\n return False", "def _filter(self):\n if self.properties['reason'] in PoliceReport.reason_filter:\n return False\n return True", "def is_experiment(cfg):\n if CONDITIONS in list(cfg.keys()):\n return True\n else:\n return False", "def is_condition(cfg):\n if SELECTIONS in list(cfg.keys()):\n return True\n else:\n return False", "def test_get_scenarios(self):\n pass", "def filter_(cls, exp):\n rsc_filter = config.get('exp_filter', 'rsc_filter')\n if rsc_filter.lower() == 'all':\n return True\n if rsc_filter.lower() == exp.exp_info['faulty_resource'].lower():\n return True\n return False", "def filtered(self):\n if not is_tagged(self.tags, self.opt.tags):\n log(\"Skipping %s as it does not have requested tags\" %\n self.path, self.opt)\n return False\n\n if not aomi.validation.specific_path_check(self.path, self.opt):\n log(\"Skipping %s as it does not match specified paths\" %\n self.path, self.opt)\n return False\n\n return True", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def _acceptable(self, team):\r\n current = [c for c in self.configurations if self._validateNoSpies(c, team)]\r\n return bool(len(current) > 0)", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def is_to_filter(self):\n if not self.app.args.filter is None:\n # Check the flag value to evite problem in search process\n ok = self.validate_value_flag()\n\n if ok is False:\n fatal([\n 'Invalid value for \"value\" flag',\n 'The value flag is required to filter',\n 'Use instead:',\n '$ tasks-app show --filter/-f={} --value/-v=VALUE'.format(self.app.args.filter),\n ])\n else:\n return True\n else:\n return False", "def test_get_configs_with_filter(self) -> None:\n config1 = self.integration.create_config(name='Config 1',\n enabled=True,\n save=True)\n self.integration.create_config(name='Config 2',\n enabled=True,\n save=True)\n\n # Add some configs that shouldn't be returned.\n integration2 = \\\n self.manager.register_integration_class(DummyIntegration2)\n self.integration.create_config(name='Config 3', save=True)\n integration2.create_config(name='Config 4', save=True)\n\n self.assertEqual(self.integration.get_configs(name='Config 1'),\n [config1])", "def check_filters(self, pname, base, fealty=\"\"):\n if \"org\" in self.switches:\n return True\n if not self.args:\n return True\n if self.args.lower() == \"afk\":\n return \"(AFK)\" in pname\n if self.args.lower() == \"lrp\":\n return \"(LRP)\" in pname\n if self.args.lower() == \"staff\":\n return \"(Staff)\" in pname\n if self.args.lower() == str(fealty).lower():\n return True\n return base.lower().startswith(self.args.lower())", "def _is_supplied_by_config(group: argparse._MutuallyExclusiveGroup, conf: Dict[str, Any]) -> bool:\n group_args = []\n for arg in group._group_actions:\n group_args.append(arg.dest)\n\n count = 0\n for val in group_args:\n if val in conf:\n count += 1\n return count == len(group_args) or count == 0", "def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True", "def check_filterconfig(filterconfig, config):\n for f in filterconfig[\"filters\"]:\n if f[\"name\"] != \"frequency\":\n continue\n\n missing_freq_groups = set(iter_freq_groups(f[\"config\"][\"groups\"])) - set(\n iter_freq_groups(config[\"frequencies\"][\"groups\"])\n )\n assert not missing_freq_groups, \"Missing frequency group(s) in global config: {}\".format(\n missing_freq_groups\n )", "def test_get_scenarios_expanded(self):\n pass", "def pytest_ignore_collect(path: Any, config: Config) -> bool:\n if config.option.functional:\n return True\n if config.option.markexpr and \"wip\" in config.option.markexpr:\n return False # collect when looking for markers\n return not (config.option.integration or config.option.integration_only)", "def skip_experiment(conf):\n return (\n (conf.dataset == 'rfw' and conf.feature == 'arcface')\n or (conf.dataset == 'bfw' and conf.feature == 'facenet')\n )", "def is_valid(self, user_specific_config: Any, factor: str) -> bool:", "def is_expected_for_this_test(obj):\n if obj['test-name'] != test_name:\n return False\n if not fnmatch.fnmatch(config_filename, obj['configuration-filename']):\n return False\n expected_variant = obj.get('variant', None)\n if expected_variant == \"*\":\n return True\n for k in expected_variant:\n if not k in variant:\n return False\n if expected_variant[k] != variant[k]:\n return False\n return True", "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features", "def _filter(self, values, asset):\n log.debug(\"Testing trigger filters against asset %s\", asset['id'])\n for filter in self.filters:\n if not filter._apply(values, asset):\n return False\n return True", "def should_filter_by_semester(self):\n return self.kwargs.get('filter_by_semester', True)", "def verify_config_params(attack_config):\n _check_config(attack_config, _VALID_CONFIG_CHECKLIST)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def has_filter(self, param: str) -> bool:\n return param in self.filter_names" ]
[ "0.6195167", "0.584315", "0.5827648", "0.5815462", "0.57347786", "0.5660867", "0.56135815", "0.5557565", "0.55098784", "0.5498779", "0.5492603", "0.548081", "0.5449906", "0.54373837", "0.5436623", "0.5378134", "0.5373302", "0.5347374", "0.5344112", "0.5299899", "0.5297614", "0.5292987", "0.52814937", "0.52812284", "0.5277255", "0.5271438", "0.5268823", "0.52682865", "0.52682865", "0.5263955" ]
0.6798654
0
Hook collection modification to order tests by scenario, each scenario will start with it's setup, preceding with the tests and finally the teardown.
def pytest_collection_modifyitems(session, config, items): test_scenarios = DefaultDict(TestSuite) config_scenarios = config.option.scenarios filter_scenarios = config_scenarios is not None skip_setup = config.option.no_setup if skip_setup and (not filter_scenarios or len(config_scenarios) > 1): raise RuntimeWarning( "Configured no setup and teardown but more then one scenario is configured") # Group tests by scenario in to TestSuite objects for test in items: scenario = scenario_re.match(test.name).groups()[0] if filter_scenarios and should_skip_scenario(scenario, config_scenarios): continue if test.originalname == "test_setup": test_scenarios[scenario].setup = test elif test.originalname == "test_teardown": test_scenarios[scenario].teardown = test else: test_scenarios[scenario].tests.append(test) # Clear all pytest tests items.clear() # Order tests for scenario in test_scenarios.values(): if len(scenario.tests) != 0: suite = scenario.tests if not skip_setup: suite.insert(0, scenario.setup) suite.append(scenario.teardown) items.extend(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_collection_modifyitems(config, items):\n # check if studio tests myst be skipped\n run_study = config.getoption(\"--runstudy\")\n # 'all' will match all studies, '' will not match anything\n run_study = {'': '(?!x)x', 'all': '.*'}.get(run_study, run_study)\n # --runstudy given in cli: do not skip study tests and\n test_selected = list()\n test_skipped = list()\n groups = dict()\n incremental = pytest.mark.incremental()\n\n def add():\n \"helper for gathering test info\"\n marker = item.get_marker(mark)\n kwargs = parse_args(marker.args, marker.kwargs)\n group_name = kwargs['name']\n group = groups.setdefault(group_name, dict())\n group.setdefault(mark, list()).append((kwargs, item))\n item.add_marker(incremental)\n\n # place every test in regular, prerequisite and studies\n # group by name\n for item in items:\n for mark in set(item.keywords.keys()).intersection(MARKS):\n add()\n break\n else:\n test_selected.append(item)\n\n def sort(a, b):\n \"Sort two items by order priority\"\n return cmp(a[0]['order'], b[0]['order'])\n\n # use studies precedence to built the global sequence order\n mandatory = 'study' # mandatory mark for global sorting: study\n studies = list()\n for name, info in groups.items():\n studies.extend(info.get(mandatory, []))\n studies.sort(sort)\n\n def append(tests, where):\n \"helper to add the test item from info structure\"\n for test in tests:\n test = test[1]\n if test not in where:\n where.append(test)\n\n # select only the test that are going to be launched\n width = 0\n regexp = re.compile(run_study, re.I | re.DOTALL)\n for study in studies:\n group_name = study[0]['name']\n width = max(width, len(group_name))\n where = test_selected if regexp.search(group_name) else test_skipped\n for mark, seq in groups[group_name].items():\n if mark == mandatory:\n continue\n seq.sort(sort)\n append(seq, where)\n append([study], where)\n\n if config.getoption(\"--show_order\") or config.getoption(\"--debug\"):\n fmt = \"{0:>3d} [{1:>%s}] {2}\" % width\n for i, item in enumerate(test_selected + test_skipped):\n study = get_study_name(item)\n fqn = get_FQN(item)\n line = fmt.format(i, study, fqn)\n if item in test_selected:\n line = term.green('+' + line)\n else:\n line = term.yellow('-' + line)\n print(line)\n\n # we make the --runstudy check at the end to be able to show\n # test order with --show_order or --debig options\n # reorder tests by group name and replace items IN-PLACE\n if run_study:\n items[:] = test_selected\n return\n\n skip_test = pytest.mark.skip(reason=\"need --runstudy option to run\")\n for item in items:\n if set(item.keywords.keys()).intersection(MARKS):\n item.add_marker(skip_test)", "def set_up(self):\n for section_name, section_body in self.sections.iteritems():\n scenario = Scenario(section_name, section_body)\n self.scenarios.append(scenario)", "def test_getOrderedFeatures(self):\n print 'Running %s ...' % getName()\n \n s1 = self.sequenceListingFixture.create_sequence_instance(self.sequenceListing) \n \n# test that source feature is at index 0 when feature table has only 1 feature \n source_feature = next((f for f in s1.feature_set.all() if f.featureKey == 'source'), None)\n ordered_features = s1.getOrderedFeatures()\n self.assertTrue(source_feature)\n self.assertEqual(0, ordered_features.index(source_feature))\n \n# add feature\n f1_1 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='4')\n \n ordered_features_after_f1_1 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_1.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_1.index(f1_1))\n \n # add feature\n f1_2 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='2')\n \n ordered_features_after_f1_2 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_2.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_2.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_2.index(f1_1))\n \n # add feature\n f1_3 = Feature.objects.create(sequence=s1, \n featureKey='variation', \n location='9')\n \n ordered_features_after_f1_3 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_3.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_3.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_3.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_3.index(f1_3))\n \n # add feature\n f1_4 = Feature.objects.create(sequence=s1, \n featureKey='allele', \n location='9')\n \n ordered_features_after_f1_4 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_4.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_4.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_4.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_4.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_4.index(f1_3))\n \n # add feature\n f1_5 = Feature.objects.create(sequence=s1, \n featureKey='iDNA', \n location='9')\n \n ordered_features_after_f1_5 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_5.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_5.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_5.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_5.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_5.index(f1_5))\n self.assertEqual(5, ordered_features_after_f1_5.index(f1_3))\n \n # add feature this will be ordered before 'allele', because \n# capital letters are lower than lower case in ASCII\n f1_6 = Feature.objects.create(sequence=s1, \n featureKey='CDS', \n location='9..17')\n \n ordered_features_after_f1_6 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_6.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_6.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_6.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_6.index(f1_6))\n self.assertEqual(4, ordered_features_after_f1_6.index(f1_4))\n self.assertEqual(5, ordered_features_after_f1_6.index(f1_5))\n self.assertEqual(6, ordered_features_after_f1_6.index(f1_3))", "def pytest_before_group_items(session, config, items):", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def add_fixtures(ctest):\n\n def test_setup(funct):\n \"\"\"Test setUp decorator to add fixture reloading.\"\"\"\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup\n\n for test in ctest._tests:\n test.setUp = test_setup(test.setUp)", "def pytest_after_group_items(session, config, items):", "def testSetUp(self):\n for layer in self.layers:\n if hasattr(layer, 'testSetUp'):\n layer.testSetUp()", "def pytest_collection_modifyitems(config, items):\n execute_mssql_tests = ensure_mssql_ready_for_tests(config)\n skip_mssql = pytest.mark.skip(reason=\"requires SQL Server\")\n for item in items:\n if \"mssql\" in item.keywords:\n if execute_mssql_tests:\n # Add 'mssql_setup_and_teardown' as FIRST in fixture list\n fixtures = ['mssql_setup_and_teardown'] + item.fixturenames\n item.fixturenames = fixtures\n else:\n item.add_marker(skip_mssql)\n if \"http_server\" in item.keywords:\n item.fixturenames.append('http_server_setup_and_teardown')", "def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]", "def pytest_collection_modifyitems(items, config):\n from . import tagging\n\n remaining = []\n deselected = []\n for item in items:\n # Get all tags for this test (includes tags on class level if present)\n tags = item.get_marker(\"tags\")\n\n if config.option.collectonly:\n info = item.parent.name.split(\"/\")\n print({\"name\": item.name, \"folder\": info[3],\n \"module\": info[4][:-3], \"tags\": tags.args})\n\n # This line fills two purposes. Handle the cases where '--tags'\n # 1) was omitted, this will run all tests except 'not active' and\n # 'awaiting_fix' ones.\n # 2) has no arguments (a case that can occur when running in Docker).\n tags = tags.args + ('all',) if tags else ['all']\n\n # Create a list of the tags\n tags_list = tagging.TagsCollection.build_tags_list(tags)\n\n # Determine if test should be run depending on the parameter tags\n # See also: pytest_configure hook\n if config.parameter_tags.should_pick_up(tags_list):\n remaining.append(item)\n else:\n deselected.append(item)\n\n if deselected:\n config.hook.pytest_deselected(items=deselected)\n items[:] = remaining", "def sortScenarios(self, scenarioSet):\n scenarios = list(scenarioSet)\n\n for scenarioName in scenarios:\n scenario = self.scenarioDict[scenarioName]\n if scenario.isBaseline:\n scenarios.remove(scenarioName)\n scenarios.insert(0, scenarioName)\n break\n\n return scenarios", "def setUpClass(self):\n\n self.test_a_summarize()\n self.test_b_store()\n self.test_c_get_existing_records()\n self.test_d_remove_database()", "def test_get_scenarios(self):\n pass", "def before_run_tests(cls):\n pass", "def process_collection_tests(pathname, ctx):\n ctx.enter_tests()\n for dirname, dirpath in os_listdir(pathname):\n if dirname == \"integration\" and os.path.isdir(dirpath):\n process_integration_tests(dirpath, ctx)\n elif os.path.isfile(os.path.join(dirpath, \"tests_default.yml\")):\n ctx.enter_role(dirname, dirpath)\n process_role_tests_path(dirpath, ctx)\n ctx.exit_role()\n elif os.path.isdir(dirpath) and dirname in SKIP_COLLECTION_TEST_DIRS:\n continue\n elif os.path.isfile(dirpath):\n process_ansible_file(dirpath, ctx)\n elif os.path.isdir(dirpath):\n # don't know what this is - process like ansible yml files\n process_ansible_yml_path(dirpath, ctx)\n\n ctx.exit_tests()", "def sort(self):\n for _ in self.stage1():\n yield\n for _ in self.stage2():\n yield", "def test_can_sort_featurez(self):\n for i in range(10):\n random_suggestion = choice(Suggestion.objects.all())\n random_user = choice(User.objects.all())\n new_upvote = Upvote(user=random_user, suggestion=random_suggestion)\n new_upvote.save()\n\n for i in range(10):\n random_suggestion = choice(Suggestion.objects.all())\n random_user = choice(User.objects.all())\n new_comment = Comment(user=random_user, suggestion=random_suggestion, comment=\"test\")\n new_comment.save()\n\n for i in range(15):\n random_comment = choice(Comment.objects.all())\n random_user = choice(User.objects.all())\n comment_upvote = Upvote(user=random_user, comment=random_comment)\n comment_upvote.save()\n\n most_upvoted_feature_first = return_current_features()\n previous_entry_upvotes = 1000\n for feature in most_upvoted_feature_first:\n self.assertTrue(feature.upvotes <= previous_entry_upvotes)\n previous_entry_upvotes = feature.upvotes\n\n most_upvoted_bug_first = return_all_current_bugs(\"-upvotes\")\n previous_entry_upvotes = 1000\n for bug in most_upvoted_bug_first:\n self.assertTrue(bug.upvotes <= previous_entry_upvotes)\n previous_entry_upvotes = bug.upvotes\n\n most_upvoted_comment_first = return_public_suggestion_comments(choice(Suggestion.objects.all()), \"-upvvotes\")\n previous_entry_upvotes = 1000\n for comment in most_upvoted_comment_first:\n self.assertTrue(comment.upvotes <= previous_entry_upvotes)\n previous_entry_upvotes = comment.upvotes\n\n oldest_feature_first = return_current_features(\"oldest\")\n previous_entry_date = datetime.date(2050, 1, 1)\n for feature in oldest_feature_first:\n self.assertTrue(feature.date_time.date() <= previous_entry_date)\n previous_entry_date = feature.date_time.date()\n\n oldest_bug_first = return_all_current_bugs(\"oldest\")\n previous_entry_date = datetime.date(2050, 1, 1)\n for bug in oldest_bug_first:\n self.assertTrue(bug.date_time.date() <= previous_entry_date)\n previous_entry_date = bug.date_time.date()\n\n oldest_comment_first = return_public_suggestion_comments(choice(Suggestion.objects.all()), \"oldest\")\n previous_entry_date = datetime.date(2050, 1, 1)\n for comment in oldest_comment_first:\n self.assertTrue(comment.date_time.date() <= previous_entry_date)\n previous_entry_date = comment.date_time.date()\n\n newest_feature_first = return_current_features(\"newest\")\n previous_entry_date = datetime.date(1990, 1, 1)\n for feature in newest_feature_first:\n self.assertTrue(feature.date_time.date() >= previous_entry_date)\n previous_entry_date = feature.date_time.date()\n\n newest_bug_first = return_all_current_bugs(\"newest\")\n previous_entry_date = datetime.date(1990, 1, 1)\n for bug in newest_bug_first:\n self.assertTrue(bug.date_time.date() >= previous_entry_date)\n previous_entry_date = bug.date_time.date()\n\n newest_comment_first = return_public_suggestion_comments(choice(Suggestion.objects.all()), \"newest\")\n previous_entry_date = datetime.date(1990, 1, 1)\n for comment in newest_comment_first:\n self.assertTrue(comment.date_time.date() >= previous_entry_date)\n previous_entry_date = comment.date_time.date()", "def collectTests(self, global_ctx):\n pass", "def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_items_fixture, sorted, benchmark)", "def pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument\n if not config.getoption(\"bdd_execution\"):\n return\n # BDD execution\n # Remove all non-BDD tests\n items[:] = [item for item in items if hasattr(item, \"verify_and_process_scenario\")]\n # Process the BDD tests, i.e. scenario items\n for item in items:\n item.verify_and_process_scenario()\n # Check errors\n if data.get_errors() or data.get_missing_steps():\n # TODO: I don't know a better way to exit, but deselect all tests\n items.clear()", "def setUp(self) -> None:\n create_test_categories()", "def test_feature_all_scenarios(mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add regular Scenarios to Feature\n feature.scenarios.extend([mocker.MagicMock(id=1), mocker.MagicMock(id=2)])\n # add Scenario Outline to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioOutline,\n id=3,\n scenarios=[mocker.MagicMock(id=4), mocker.MagicMock(id=5)],\n )\n )\n # add Scenario Loop to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioLoop,\n id=6,\n scenarios=[mocker.MagicMock(id=7), mocker.MagicMock(id=8)],\n )\n )\n\n # when\n all_scenarios = feature.all_scenarios\n\n # then\n assert len(all_scenarios) == 8\n assert all_scenarios[0].id == 1\n assert all_scenarios[1].id == 2\n assert all_scenarios[2].id == 3\n assert all_scenarios[3].id == 4\n assert all_scenarios[4].id == 5\n assert all_scenarios[5].id == 6\n assert all_scenarios[6].id == 7\n assert all_scenarios[7].id == 8", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_after_install(self):\n self.run_test_suites(self.after_install_test_suite_list)", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def pytest_generate_tests(metafunc):\n\n # test is setup or teardown - parametrize to all scenarios\n if metafunc.function.__name__ in [\"test_setup\", \"test_teardown\"]:\n metafunc.parametrize(\n \"scenario\", Scenario.scenarios.values())\n\n # parameterize test for each scenario it is included in\n else:\n metafunc.parametrize(\n \"scenario\", metafunc.cls._scenarios)", "def startTestHook(self):", "def test_hookimpls_can_be_sorted_by_the_order():\n # given\n hooks = [\n HookImpl(\"what\", \"when\", None, [], 1),\n HookImpl(\"what\", \"when\", None, [], 10),\n HookImpl(\"what\", \"when\", None, [], 5),\n HookImpl(\"what\", \"when\", None, [], 2),\n HookImpl(\"what\", \"when\", None, [], 30),\n HookImpl(\"what\", \"when\", None, [], 8),\n HookImpl(\"what\", \"when\", None, [], 7),\n ]\n\n # when\n sorted_hooks = sorted(hooks)\n\n # then\n assert sorted_hooks == [\n HookImpl(\"what\", \"when\", None, [], 1),\n HookImpl(\"what\", \"when\", None, [], 2),\n HookImpl(\"what\", \"when\", None, [], 5),\n HookImpl(\"what\", \"when\", None, [], 7),\n HookImpl(\"what\", \"when\", None, [], 8),\n HookImpl(\"what\", \"when\", None, [], 10),\n HookImpl(\"what\", \"when\", None, [], 30),\n ]" ]
[ "0.656119", "0.61039406", "0.6084039", "0.60311735", "0.6013919", "0.59649473", "0.5939651", "0.5935594", "0.5910891", "0.59066004", "0.58609056", "0.5855353", "0.5839861", "0.5832981", "0.58159596", "0.5811885", "0.58009213", "0.5782111", "0.5779561", "0.5775907", "0.57642555", "0.57372606", "0.5724714", "0.5699279", "0.5670595", "0.5628184", "0.5614075", "0.55956906", "0.5578976", "0.55588406" ]
0.7278946
0
Hook makereport to mark if the scenario setup has failed
def pytest_runtest_makereport(item, call): if item.originalname == "test_setup" and call.when == "call": try: # TODO: not sure if this check is enough failed = not call.result == [] except: # call does not have valid result attribute if some exception happended # during the test failed = True scenario = scenario_re.match(item.name).groups()[0] _scenario_setup_failed[scenario] = failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indicate_failure(self):\n pass", "def StepFailure(self):\n return recipe_api.StepFailure", "def test_case_01(self):\n if True:\n self.fail()", "def pytest_runtest_makereport(item, call):\n if \"incremental\" in item.keywords:\n if call.excinfo is not None:\n parent = item.parent\n parent._previousfailed = item", "def set_test_failed(self):\n self.set_result(Status.FAILED)", "def test_launch_failures_hw(self):\n self.test_launch_failures()", "def test_state_after_failure(self):\n pass", "def useFailures(self):\n self.setupTests(tests = self.failures)", "def failed(self):\n\t\tpass", "def on_failure(self):\n pass", "def is_successful(self):\n for item in self.summary:\n if item['task_status'] is False:\n return testcase.TestCase.EX_TESTCASE_FAILED\n\n return super().is_successful()", "def test_xfailed_but_passed():\n pass", "def test_pytest_bdd_scenario_with_failed_step(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == -1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[3].name == \"then\"\n assert spans[3].get_tag(ERROR_MSG)", "def test_04_fail(self):\n if y == 2:\n self.fail('This is a custom fail message')", "def failed(self) -> None:\n self.failure_count += 1", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])", "def test_verification_failed(self):\n pass", "def failure(self, target):\n print \"FAILED:\"\n self.show_target(target)\n self.failed += 1", "def report_trial(self):\n pass", "def setUp(self):\n if not self.flag:\n self.fail(self.err_msg)", "def test_failed():\n assert False", "def pytest_runtest_setup(item):\n if \"incremental\" in item.keywords:\n previousfailed = getattr(item.parent, \"_previousfailed\", None)\n if previousfailed is not None:\n pytest.xfail(\"previous test failed (%s)\" % previousfailed.name)", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "def failed( self, mesg ):\n self.tests_failed += 1\n print \"fail: \" + mesg.rstrip()", "def pytest_runtest_makereport(item, call):\n report = (yield).get_result() # pytest.TestReport\n config = item.config\n enabled = config.getvalue('yagot')\n if enabled:\n if report.when == \"call\" and not report.passed:\n import yagot\n tracker = yagot.GarbageTracker.get_tracker()\n tracker.ignore()", "def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)", "def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]", "def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))", "async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])" ]
[ "0.7047196", "0.6925117", "0.67258424", "0.67041105", "0.66620785", "0.6551664", "0.64687765", "0.6450989", "0.643399", "0.6353542", "0.6330532", "0.63222635", "0.63013333", "0.62878495", "0.62275034", "0.6202006", "0.619779", "0.61859745", "0.6149418", "0.6101016", "0.609423", "0.60650504", "0.60600466", "0.60337347", "0.60335803", "0.60133874", "0.60023427", "0.6000997", "0.59983164", "0.59945726" ]
0.7266723
0
Hook runtest_setup to skip test if their scenario setup has failed
def pytest_runtest_setup(item): if not item.originalname == "test_setup": scenario = scenario_re.match(item.name).groups()[0] if _scenario_setup_failed[scenario]: pytest.skip(f"Setup for {scenario} failed, skipping...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_status_check(dp_setup):\n\n if dp_setup.setup_status == 0:\n pytest.skip(\"Skipping testcase because setup \\\n did not complete properly\")", "def pytest_runtest_setup(item):\n if \"incremental\" in item.keywords:\n previousfailed = getattr(item.parent, \"_previousfailed\", None)\n if previousfailed is not None:\n pytest.xfail(\"previous test failed (%s)\" % previousfailed.name)", "def class_level_setup(self, request):\n test_name = request.function.__name__\n if data_reader.get_data(test_name, \"Runmode\") != \"Y\":\n pytest.skip(\"Excluded from current execution run.\")", "def pytest_runtest_setup(item):\n if 'slow' in item.keywords and not item.config.getoption(\"--runslow\"):\n pytest.skip(\"need --runslow option to run\")", "def test_skip_in_test():\n pytest.skip()", "def setUp(self):\n if not self.flag:\n self.fail(self.err_msg)", "def test_skipif_false():\n pass", "def test_skipif_true():\n pass", "def pytest_runtest_setup(item):\n if hasattr(item, 'fixturenames') and LOOP_KEY not in item.fixturenames:\n item.fixturenames.append(LOOP_KEY)", "def test_skip():\n pytest.skip('for a reason!')", "def requires_setup(step, setup_names):\r\n pass", "def test_start(self):\n self.fail(\"write a test\")", "def class_level_setup(self, request):\n\n if data_reader.get_data(request.function.__name__, \"Runmode\") != \"Y\":\n pytest.skip(\"Excluded from current execution run.\")", "def test_test_no_tests(self, caplog, runway_config, runway_context):\n caplog.set_level(logging.ERROR, logger=\"runway\")\n obj = Runway(runway_config, runway_context)\n obj.tests = []\n with pytest.raises(SystemExit) as excinfo:\n assert obj.test()\n assert excinfo.value.code == 1\n assert \"no tests defined in runway.yml\" in caplog.messages[0]", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def skip_or_run_test_tarantool(func, required_tt_version, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, required_tt_version, msg)\n\n if func.__name__ != 'setUp':\n func(self, *args, **kwargs)\n\n return wrapper", "def skip_test(reason):\n global test_name_text\n print \"SKIP {}: {}\\n\".format(test_name_text, reason)\n sys.exit(0)", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def addSkip(self, test):\n test.status = \"skipped\"", "def filterOneTest(self, test_name):\n super(VtsKernelLibcutilsTest, self).filterOneTest(test_name)\n asserts.skipIf(\n test_name.split('.')[0] not in self.include_test_suite,\n 'Test case not selected.')", "def test_skip_with_reason_in_test():\n pytest.skip(SKIP_REASON)", "def setUp(self):\n test_env_setup()", "def skip_if_running_nose(msg=''):\n if is_nose_running():\n import nose\n raise nose.SkipTest(msg)", "def stopTestRun(self):", "def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()", "def before_run_tests(cls):\n pass", "def runTest(self):\n self.setUp()\n self.test_ProstateReporting1()", "def startTestHook(self):", "def useFailures(self):\n self.setupTests(tests = self.failures)", "def before_test(self, func, *args, **kwargs):\n pass" ]
[ "0.7539502", "0.7168475", "0.680045", "0.6769899", "0.6741446", "0.6730891", "0.6708668", "0.67018366", "0.66657114", "0.6512723", "0.6464716", "0.6456349", "0.6433945", "0.6433349", "0.63669145", "0.6332618", "0.633232", "0.6290003", "0.6274346", "0.62639135", "0.62599593", "0.6250447", "0.6200473", "0.6184224", "0.6183683", "0.6179722", "0.61683893", "0.6141115", "0.613866", "0.6121233" ]
0.79868513
0
Get list of port forwarding entities.
async def _async_port_entities_list( avm_wrapper: AvmWrapper, device_friendly_name: str, local_ip: str ) -> list[FritzBoxPortSwitch]: _LOGGER.debug("Setting up %s switches", SWITCH_TYPE_PORTFORWARD) entities_list: list[FritzBoxPortSwitch] = [] if not avm_wrapper.device_conn_type: _LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_PORTFORWARD) return [] # Query port forwardings and setup a switch for each forward for the current device resp = await avm_wrapper.async_get_num_port_mapping(avm_wrapper.device_conn_type) if not resp: _LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION) return [] port_forwards_count: int = resp["NewPortMappingNumberOfEntries"] _LOGGER.debug( "Specific %s response: GetPortMappingNumberOfEntries=%s", SWITCH_TYPE_PORTFORWARD, port_forwards_count, ) _LOGGER.debug("IP source for %s is %s", avm_wrapper.host, local_ip) for i in range(port_forwards_count): portmap = await avm_wrapper.async_get_port_mapping( avm_wrapper.device_conn_type, i ) if not portmap: _LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION) continue _LOGGER.debug( "Specific %s response: GetGenericPortMappingEntry=%s", SWITCH_TYPE_PORTFORWARD, portmap, ) # We can only handle port forwards of the given device if portmap["NewInternalClient"] == local_ip: port_name = portmap["NewPortMappingDescription"] for entity in entities_list: if entity.port_mapping and ( port_name in entity.port_mapping["NewPortMappingDescription"] ): port_name = f"{port_name} {portmap['NewExternalPort']}" entities_list.append( FritzBoxPortSwitch( avm_wrapper, device_friendly_name, portmap, port_name, i, avm_wrapper.device_conn_type, ) ) return entities_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_port_forward_list(self, nPortFwdType):\n\t\treturn handle_to_object(call_sdk_function('PrlVirtNet_GetPortForwardList', self.handle, nPortFwdType))", "def port_list(self):\n return self._port_list", "def _get_forwarding_groups(self):\n return self.__forwarding_groups", "def _get_forwarding_groups(self):\n return self.__forwarding_groups", "def _get_forwarding_groups(self):\n return self.__forwarding_groups", "def forward_ports(ports):\n forwards = []\n remap_port = ADB_BASE_PORT\n for port in ports:\n forwards += [\"hostfwd=tcp::%d-:%d\" % (port, remap_port)]\n remap_port = remap_port + 1\n return [\n \"-device\", \"virtio-net,netdev=adbnet0\", \"-netdev\",\n \"user,id=adbnet0,%s\" % \",\".join(forwards)\n ]", "def get_peer_ports(self, port: Identifier) -> List[Reference]:\n return self.__peers[self.__kernel + port]", "def get_log_forwarding_destinations(self) -> dict:\n uri = f\"{self.uri}/log-forwarding-destinations\"\n\n response = self.request(uri=uri)\n return response.json()", "def find_ports(destination):\n output_ports = set()\n if isinstance(destination, collections.Iterable):\n for device in destination:\n # ports leading to device\n ports_towards_device = self.forwarding_table.get(\n device, self.ports)\n output_ports.update(ports_towards_device)\n else:\n output_ports.update(\n self.forwarding_table.get(destination, self.ports))\n return output_ports", "def get_forward_mapping(self):", "def get_ports(self):\n return self._ports", "def ports(self) -> List[int]:\n if self.head_port:\n return [self.head_port]\n else:\n ports = []\n for replica in self.pod_args['pods'][0]:\n if isinstance(replica.port, list):\n ports.extend(replica.port)\n else:\n ports.append(replica.port)\n return ports", "def _GetPorts(self):\n ports = []\n for start, end in self.term.destination_port:\n if start == end:\n ports.append(str(start))\n else:\n ports.append('%d-%d' % (start, end))\n return ports", "def list_ports(self):\n return self.ironic_client.port.list()", "def list(self):\n path = 'orgProvisioning/ipGreTunnelInfo'\n return self._session.get(path)", "def entities(self):\n return self._entities", "def get_persons(self):\n return self.person_list.model().get_person_list()", "def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])", "def list_feed(self):\n entities = []\n entities_j = self._get('traversal/type=f')\n if entities_j:\n for entity_j in entities_j:\n entities.append(Feed(entity_j['id'], CanonicalPath(entity_j['path'])))\n return entities", "def get_ports(self) -> tuple:\n return self._current_dev_manager.get_ports()", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def get_ptf_recv_ports(duthost, tbinfo):\n recv_ports = []\n mg_facts = duthost.get_extended_minigraph_facts(tbinfo)\n for ptf_idx in list(mg_facts[\"minigraph_ptf_indices\"].values()):\n recv_ports.append(ptf_idx)\n return recv_ports", "def list_ports(state):\n\tstate.report()", "def get_ptf_port(duthosts, cfg_facts, tbinfo, dut, dut_port):\n\n # get the index of the frontend node to index into the tbinfo dictionary.\n mg_facts = dut.get_extended_minigraph_facts(tbinfo)\n\n if \"portchannel\" in dut_port.lower():\n pc_cfg = cfg_facts['PORTCHANNEL_MEMBER']\n pc_members = pc_cfg[dut_port]\n logger.info(\"Portchannel members %s: %s\", dut_port, list(pc_members.keys()))\n port_list = list(pc_members.keys())\n else:\n port_list = [dut_port]\n\n ret = []\n for port in port_list:\n ret.append(mg_facts['minigraph_ptf_indices'][port])\n\n return ret", "def getListOfPorts(self):\n return _libsbml.CompModelPlugin_getListOfPorts(self)", "def relayed_ifaces(self):\n if self.is_relayed():\n return self._ifaces\n return None", "def forward_messages(self, message_list):\n\n def find_ports(destination):\n \"\"\"\n Return a list of the ports that according to the forwarding table\n lead to 'destination'.\n\n Arguments:\n destination: an instance of class NetworkDevice or an iterable\n of NetworkDevice instances.\n\n Returns:\n A set of the ports that lead to the devices in 'destination'.\n\n \"\"\"\n output_ports = set()\n if isinstance(destination, collections.Iterable):\n for device in destination:\n # ports leading to device\n ports_towards_device = self.forwarding_table.get(\n device, self.ports)\n output_ports.update(ports_towards_device)\n else:\n output_ports.update(\n self.forwarding_table.get(destination, self.ports))\n return output_ports\n\n for message in message_list:\n destinations = message.destination\n output_ports = find_ports(destinations)\n for port in output_ports:\n new_message = Message.from_message(message)\n self.env.process(\n self.instruct_transmission(new_message, port))", "def get_instances_to_forward(self, definition: Definition) -> List[str]:\n if not definition in self._internal_book:\n return []\n return [\n instance_name\n for instance_name, definitions in self.book.items()\n if definition in definitions\n ]", "def get_transports(self) -> List[TransportPair]:\n with self._lock:\n return self._transport_list", "def get_entities(self):\n return list(self._entities.values())" ]
[ "0.6575624", "0.59077907", "0.5837713", "0.5837713", "0.5837713", "0.5738165", "0.5708386", "0.56922287", "0.5692077", "0.5650982", "0.5595273", "0.55351114", "0.5523203", "0.5486832", "0.5478469", "0.54460037", "0.5389354", "0.5386047", "0.5374901", "0.5352854", "0.5350101", "0.5328229", "0.5317349", "0.52874917", "0.5264472", "0.52456725", "0.5234031", "0.5226465", "0.5224957", "0.52214897" ]
0.60739696
1
Return call deflection data.
def data(self) -> dict[str, Any]: return self.coordinator.data["call_deflections"].get(self.deflection_id, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self):", "def get_data(self):\r\n pass", "def get_fat(self):\n if len(self.records) == 0 or self.records[0].FORMAT is None:\n return self.records, self.var_dict\n fields = [fld for fld in self.records[0].FORMAT.split(':')]\n calldata = vcf.model.make_calldata_tuple(fields)\n args = ['./.']\n args.extend([None for _ in fields[1:]])\n cd_obj = calldata(*args)\n for rec in self.records:\n rec.samples[0].data = cd_obj\n return self.records, self.var_dict", "def get_data():\n return", "def call(self) -> List[Dict]:", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def _get_data(self):\n raise NotImplementedError()", "def data(self):", "def get_definition(self, info):\r\n pass", "def _get_rpc_call_object(self):\n callobj = dict()\n callobj[\"jsonrpc\"] = \"2.0\"\n callobj[\"method\"] = self.command\n callobj[\"id\"] = self.cmd_id\n callobj[\"params\"] = self.arguments\n return callobj", "def __call__(self, data, **kwargs):", "def data(self):\n pass", "def data(self):\n pass", "def request_data(self):\n pass", "def calls(self):\r\n return calls.Calls(self)", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def get_data(self):\n return {\n \"fd\": self.get_fd_j(self.id),\n \"fd_part\": self.get_fd_part_j(self.id),\n \"config\": self.config,\n # \"config\": self.get_config_j(self.id),\n \"prp\": self.get_prp_j(self.id),\n \"alll\": self.my_alll(self.id)\n }", "def __call__(self) -> dict:\n\t\tpass", "def get_data(self):\n\n return super().get_data()", "def data(self) -> dict:\n raise NotImplementedError()", "def getInfo():", "def call(self):", "def info(self) -> dict:", "def get_info(self):\n pass" ]
[ "0.6029236", "0.5850787", "0.5850787", "0.5816029", "0.57775885", "0.568967", "0.5680663", "0.56521213", "0.5634642", "0.556317", "0.556317", "0.556317", "0.5560738", "0.5534589", "0.5529225", "0.5527906", "0.55139613", "0.547582", "0.547582", "0.5412613", "0.54075426", "0.5407448", "0.5397278", "0.53723913", "0.5360844", "0.5349186", "0.5336571", "0.53248745", "0.52952623", "0.5285369" ]
0.82881063
0
Returns None if file is not locked, otherwise returns the user and the client the file is locked with.
def locked(self): for result in p4run('opened', '-a', self.__path): if '+l' in result['type'] or '+m' in result['type']: user = P4User(result['user']) client = P4Client(result['client']) return user, client
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_lockfile(self):\n try:\n with open(self.lockfile) as f:\n return f.read()\n except EnvironmentError as e:\n if e.errno in self.NOT_EXIST_ERRORS:\n return None\n raise", "def acquire(self):\r\n start_time = time.time()\r\n import getpass\r\n userName = getpass.getuser()\r\n import platform\r\n computerName = platform.uname()[1]\r\n while True:\r\n try:\r\n self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)\r\n os.write(self.fd, userName + '\\n')\r\n os.write(self.fd, computerName + '\\n')\r\n os.write(self.fd, time.ctime(time.time()))\r\n break;\r\n except OSError as e:\r\n if e.errno != errno.EEXIST and e.errno != errno.EACCES:\r\n raise \r\n if (time.time() - start_time) >= self.timeout:\r\n if e.errno == errno.EEXIST:\r\n raise FileLockException(\"Timeout occured.\")\r\n else:\r\n raise FileLockException(\"Access denied.\")\r\n time.sleep(self.delay)\r\n self.is_locked = True", "def get_lock():\n if not os.path.exists(lock_file):\n fl = open(lock_file, 'a+')\n try:\n fcntl.lockf(fl, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as e:\n if e.errno not in (errno.EACCES, errno.EAGAIN):\n # Something else started. This is not likely.\n raise(IOError, 'already locked')\n sys.exit(1)\n else:\n fl = open(lock_file, 'r+')\n try:\n fcntl.lockf(fl, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as e:\n # File is lready locked.\n raise(IOError, 'already locked')\n sys.exit(2)\n return fl", "def locked(self):\n return self._filelock.locked", "def mod_header_user() -> Optional[User]:\n return None", "def get_lock(self):\n function_string = 'IFLOCK'\n self.scpi_comm(function_string)\n function_string = 'IFLOCK?'\n status = int(self.scpi_comm(function_string))\n return_message = \"\"\n if status == 0:\n return_message = \"Not successful\"\n if status == -1:\n return_message = \"Device already locked\"\n if status == 1:\n return_message = \"Lock acquired\"\n return return_message", "def wm_desired_user(self):\n return self.get_par(\"drive\")", "def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()", "def getUser(self, username):\r\n if (self._credCache is None or\r\n os.path.getmtime(self.filename) > self._cacheTimestamp):\r\n self._cacheTimestamp = os.path.getmtime(self.filename)\r\n self._credCache = dict(self._loadCredentials())\r\n return self._credCache[username]", "def svn_fs_get_lock(*args):\r\n return _fs.svn_fs_get_lock(*args)", "def lock_file_name(self):\n return self._pid_file", "def get_lock_file():\n if OPTIONS.pidfile:\n return expanduser(OPTIONS.pidfile)\n\n if os.name == 'posix':\n return '/var/run/pickup.pid'\n elif os.name == 'nt':\n lock_file = join(os.environ['APPDATA'], 'pickup', 'pickup.pid')\n os.makedirs(dirname(lock_file))\n return lock_file\n else:\n LOG.error('Unable to create the lock file on this OS (%r)' % os.name)\n sys.exit(9)", "def get_user(self):\n return None", "def _get_current_user(self):\n\n out, err, exitcode = self.execute('whoami')\n\n if exitcode == 0:\n return User(out[0])\n return None", "def is_locked(self):\n return bool(int(self._fp(self.F_LOCKED).read()))", "def locked(self):\n\t\treturn self.__locked", "def user(self):\n if not flask_login.current_user.is_anonymous():\n return flask_login.current_user._get_current_object()\n else:\n return None", "def get_lock():\n fh = None\n # We don't do anything unless --synchronous_name is set.\n if args.synchronous_name is not None:\n if not os.path.isdir(args.synchronization_dir):\n log('--synchronization_dir does not exist, attempting to create')\n os.mkdir(args.synchronization_dir)\n\n lock = os.path.join(args.synchronization_dir, args.synchronous_name)\n fh = open(lock, 'w')\n log('Acquiring lock on %s' % lock)\n if args.nonblocking:\n try:\n fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n log('We did not get the lock but --nonblocking is true; '\n 'exiting successfully')\n fh.close()\n sys.exit(0)\n else:\n # Wait indefinitely. Hopefully there is a timeout on the synchro.py\n # holding the lock.\n fcntl.flock(fh, fcntl.LOCK_EX)\n log('Lock acquired')\n return fh", "def is_locked(self):\n if not os.path.isfile(self.file) or not os.path.isfile(self._lockedFile):\n self._is_locked = False\n else:\n self._is_locked = True", "def is_locked(filepath):\n locked = None\n file_object = None\n if os.path.exists(filepath):\n try:\n #print(\"Trying to open %s.\" % filepath)\n buffer_size = 8\n # Opening file in append mode and read the first 8 characters.\n file_object = open(filepath, 'a', buffer_size)\n if file_object:\n #print(\"%s is not locked.\" % filepath)\n locked = False\n except IOError:\n print(\"File is locked.\")\n locked = True\n finally:\n if file_object:\n file_object.close()\n #print(\"%s closed.\" % filepath)\n else:\n print(\"%s not found.\" % filepath)\n return locked", "def get_current_user(self):\n return None", "def get_current_user(self):\r\n return self.jira.current_user()", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None", "def user(self):\n return self.owner.user", "def locked_get(self):\n credential = self._multistore._get_credential(self._key)\n if credential:\n credential.set_store(self)\n return credential", "def user(self):\n return self._forced_user", "def lock(self):\n return self._lock" ]
[ "0.62164235", "0.6191714", "0.6041426", "0.6023689", "0.56963855", "0.5626865", "0.56259793", "0.5596826", "0.5577915", "0.5561635", "0.55318743", "0.5522971", "0.54924977", "0.54642296", "0.54564303", "0.54481745", "0.5433529", "0.53933084", "0.537763", "0.53640884", "0.53458863", "0.5343063", "0.5341986", "0.5341986", "0.5341986", "0.5333448", "0.53306097", "0.5306101", "0.5302154", "0.5289994" ]
0.67735803
0
Creates backup folder if doesn't exist, and moves all data into it.
def create_backup(): if not os.path.exists(backup_directory): print("Creating backup folder as 'FINGER DATA BACKUP'...") os.makedirs(backup_directory) for directory in directories: print("Backing up data for label '{}'...".format(directory)) shutil.copytree('./'+directory, backup_directory+'/'+directory) print("Backup creation complete!") else: print("Backup already exists. If data is missing, it must be manually moved to the backup.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backup_directory(self, source_directory, destination_directory):\n pass", "def reset_backup_folder(self):\n pass", "def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def backup(openbazaarInstallationPath,\n backupFolderPath,\n onSucessCallback=None,\n onErrorCallback=None):\n\n dateTime = time.strftime('%Y-%h-%d-%H-%M-%S')\n outputFilePath = os.path.join(\n backupFolderPath,\n \"openbazaar-%s.tar.gz\" % dateTime\n )\n\n # Create the folder for the backup, if it doesn't exist.\n try:\n os.makedirs(backupFolderPath)\n except os.error:\n pass\n\n db_folder = os.path.join(openbazaarInstallationPath, \"db\")\n try:\n with tarfile.open(outputFilePath, \"w:gz\") as tar:\n tar.add(db_folder, arcname=os.path.basename(db_folder))\n except tarfile.TarError as e:\n # TODO: Install proper error logging.\n print \"Error while backing up to:\", outputFilePath\n if onErrorCallback is not None:\n onErrorCallback(e)\n return\n\n if onSucessCallback is not None:\n onSucessCallback(outputFilePath)", "def backup_data():\n\ttry:\n\t\tos.chdir(backup_directory)\n\texcept:\n\t\tprint(\"Backup folder does not exist!\")\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tos.chdir('..')\n\tfor directory in directories:\n\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\tprint(\"Backup complete!\")", "def mkdir_with_backup(path, mode=0o777):\n if os.path.isdir(path):\n if len(os.listdir(path)) > 0:\n # directory already exists and is non-empty; backup it and\n # make a new one\n backup(path)\n os.makedirs(path, mode)\n else:\n # keep existing empty directory\n pass\n else:\n os.makedirs(path, mode)", "def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)", "def makeBackUp(portal, portal_objects, temp_dir_path, obj_id):\n\n # Get id of temp folder-object\n durty_path,temp_id = osp.split(temp_dir_path)\n\n if not temp_id:\n durty_path,temp_id = osp.split(durty_path)\n\n # Get temp folder-object\n if temp_id not in portal_objects:\n portal.invokeFactory('Large Plone Folder', id=temp_id)\n print >> import_out, \"! Created '%s' backup directory with same-ids \" \\\n \"objects from portal root.\" % temp_id\n temp_dir = getattr(portal, temp_id)\n\n # Move object with same id to temp folder-object\n #get_transaction().commit(1)\n transaction.savepoint()\n obj = portal.manage_cutObjects(ids=[obj_id])\n temp_dir.manage_pasteObjects(obj)\n\n print >> import_out, \"! '%s' Object moved from portal root to '%s' backup directory.\" % (obj_id, temp_id)", "def create_backup_dir(params_dict):\r\n print threading.currentThread().getName(), 'Starting'\r\n backup_location = params_dict['DB2 Installation Backup Directory']\r\n if not os.path.exists(backup_location):\r\n steplog.info(\"Attempting to create the backup directory\")\r\n try:\r\n os.makedirs(backup_location)\r\n steplog.info(\"Backup directory has been created\")\r\n except OSError:\r\n if not os.path.isdir(backup_location):\r\n raise ValueError(\"Backup directory could not be created\")", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def move_from_temp_directory(self):", "def __makeBackup(self):\n pass #FIXME!!!", "def backup(self):\n\n if not File.backup_text(self.get_title()): return\n if Settings.get_destination() == \"remote\":\n Remote.upload_file(self)\n elif Settings.get_destination() == \"google\":\n Google.upload_file(file=self)\n else:\n # move file to local backup location\n backupPath = os.path.join(Settings.get_local_path(), \"posted\")\n backupPath = os.path.join(backupPath, self.category, self.get_title())\n shutil.move(self.get_path(), backupPath)", "def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def backup(self):\n if self.url is not None:\n\n # zip backup folder\n zipapp.create_archive(self.logs_directory, self.send_zip)\n\n # then send zipped folder to the URL\n try:\n requests.post(self.url, files={\n 'uploaded_file': (os.path.basename(self.send_zip), open(self.send_zip, 'rb')),\n })\n except requests.exceptions.ConnectionError as error:\n print(error)", "def backup_files(self):\n backup_path = os.path.join(self.backupdir, self.get_timestamp().replace(':', '-'))\n try:\n if not os.path.exists(backup_path):\n self.make_path(backup_path)\n if not os.path.exists(backup_path):\n raise IOError('Path was not made correctly')\n else:\n self.print_to_log('Backup path: %s' % backup_path)\n for item in self.file_list:\n try:\n self.print_to_log('Backing up file: %s' % item)\n shutil.copy(item, backup_path)\n except IOError, why:\n self.error = 2\n self.print_to_log(str(why))\n self.print_to_log('Unable to archive file: %s continuing' % item)\n except IOError, why:\n self.print_to_log(str(why))\n self.print_to_log('Quiting with out archiving')\n self.error = 1", "def _backup_meta_data(meta_path: Path) -> None:\n meta_path = meta_path.resolve()\n backup_meta_path = meta_path.parent / (meta_path.name + \".bak\")\n i = 0\n while backup_meta_path.exists():\n backup_meta_path = backup_meta_path.with_suffix(\".bak{}\".format(i))\n i += 1\n shutil.copy(str(meta_path), str(backup_meta_path))", "def backup_csv():\n for file_name in os.listdir():\n if \".csv\" in file_name:\n print(\"There shouldn't be any .csv files in your directory. We found .csv files in your directory.\")\n directory = os.getcwd()\n try:\n os.mkdir(directory + \"/backup/\")\n except:\n print(\"Backup folder exists.\")\n timestamp = datetime.now()\n shutil.move(file_name, directory + \"/backup/\" + str(timestamp) + \"-\" + file_name)", "def move_old_excel():\n timestr = get_time()\n\n try:\n if not os.listdir('old_excel'):\n print('Folder empty no need to remove files')\n except FileNotFoundError:\n os.mkdir('old_excel')\n\n print(\"passing here\")\n try:\n if not os.listdir('excel'):\n print('Folder empty no need to remove files')\n else:\n os.rename('excel', 'old_excel/excel_' + timestr)\n os.mkdir('excel')\n print(\"created folder\")\n except FileNotFoundError:\n os.mkdir('excel')\n print(\"created folder within exception\")", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def _backup(self, parsed_args):\n if self.backup:\n dep_sys = self.document['deploymentSystem']\n dep_path = self.document['deploymentPath']\n backup_dep_path = dep_path + '.' + str(seconds())\n\n print_stderr('Backing up agave://{}/{}'.format(dep_sys, dep_path))\n start_time = milliseconds()\n self.messages.append(\n ('backup', 'src: agave://{}/{}'.format(dep_sys, dep_path)))\n self.messages.append(\n ('backup', 'dst: agave://{}/{}'.format(dep_sys,\n backup_dep_path)))\n\n try:\n # TODO - only do this if dep_path exists, otherwise an Exception will be raised\n manage.move(dep_path,\n system_id=dep_sys,\n destination=backup_dep_path,\n agave=self.tapis_client)\n print_stderr('Finished ({} msec)'.format(milliseconds() -\n start_time))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('backup', str(exc)))\n print_stderr('Failed ({} msec)'.format(milliseconds() -\n start_time))\n return False\n else:\n raise\n\n return True", "def test_backup_only(self):\n # Check that by default a backup is performed and a snapshot is created.\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--backup', '--no-sudo',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure no snapshot was created.\n assert len(find_snapshots(destination)) == 0", "def move_home_pypackage_back():\n\n backups = defaultdict(list)\n home = os.path.expanduser(\"~\")\n for file_name in os.listdir(home):\n if \".pypackage\" in file_name and file_name.endswith(\"~\"):\n file_path = os.path.join(home, file_name)\n backups[os.stat(file_path).st_ctime].append(file_path)\n\n shutil.move(\n max(backups[max(backups)]), # the longest of the lastest created\n os.path.join(home, \".pypackage\"),\n )", "def backup(self):\n logging.info('Executing NCBI Blast backup')\n backup_folder = self.create_backup_dir()\n if not backup_folder:\n logging.error('Failed to create backup folder.')\n return False\n # Copy only README files for future reference\n app_readme_file = self.config['readme_file']\n ncbi_readme_file = self.info_file_name\n try:\n shutil.copy2(app_readme_file, backup_folder)\n shutil.copy2(ncbi_readme_file, backup_folder)\n except Exception as e:\n logging.exception('NCBI Blast Backup did not succeed. Error: {}'\n .format(e))\n return False\n return True", "def prepare_storage(self):\n self.logger.info(\"Preparing storage for your data...\")\n try:\n self.dir.mkdir(exist_ok=True)\n self.full_path_to_file.touch(exist_ok=True)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()", "def test_simple_backup(self):\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--no-sudo', '--ionice=idle',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure a snapshot was created.\n assert len(find_snapshots(destination)) == 1", "def move_tracks_to_music_folder(self):\n home = os.path.expanduser(\"~\")\n dest = home + \"/Music/\"\n for each_file, artist in self.past_songs_db_data:\n sub_folder = artist + \"/\" if artist != \"\" else \"\" \n # possible race condition\n if not os.path.exists(dest + sub_folder):\n os.makedirs(dest + sub_folder)\n\n if os.path.isfile(each_file) and \\\n not os.path.isfile(dest + each_file): \n shutil.move(each_file, dest + sub_folder)" ]
[ "0.65214306", "0.6510049", "0.6464436", "0.63852924", "0.63724446", "0.6301501", "0.62980753", "0.626059", "0.61886007", "0.6152971", "0.6086255", "0.6069988", "0.60538745", "0.60263026", "0.59899116", "0.59500426", "0.5910163", "0.5907927", "0.59050465", "0.5874849", "0.5867928", "0.58677745", "0.57933426", "0.57864046", "0.576968", "0.57688993", "0.5744326", "0.5706894", "0.57048315", "0.56986505" ]
0.74460024
0
Deletes old data folders, and recreates them from the backup folder.
def restore_backup(): for directory in directories: shutil.rmtree('./'+directory) for directory in directories: print("Restoring data for label '{}'...".format(directory)) shutil.copytree(backup_directory+'/'+directory, './'+directory) print("Data restoration complete!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)", "def reset_backup_folder(self):\n pass", "def backup_data():\n\ttry:\n\t\tos.chdir(backup_directory)\n\texcept:\n\t\tprint(\"Backup folder does not exist!\")\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tos.chdir('..')\n\tfor directory in directories:\n\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\tprint(\"Backup complete!\")", "def delete_backup(self):\n for pathname in file_io.get_matching_files(self.write_checkpoint_manager._prefix + '*'):\n _delete_file_or_dir(pathname)\n for pathname in file_io.get_matching_files(os.path.join(self.write_checkpoint_manager.directory, 'checkpoint')):\n _delete_file_or_dir(pathname)", "def delete_old_backup(self):\n print \"### Info ### Delete redundant backups\"\n for i in range(len(self.date_list)-20):\n os.remove(os.path.abspath(self.backup_path + U'/voc2brain_backup_' + str(self.date_list[0])+ \".sdb3\") )", "def clean_directory():\n if os.path.exists('data'):\n shutil.rmtree('data')\n os.makedirs('data')\n\n if os.path.exists('returns'):\n shutil.rmtree('returns')\n os.makedirs('returns')", "def cleanDataDir(self):\n for strFile in os.listdir(self.strDataDir):\n os.remove(os.path.join(self.strDataDir, strFile))", "def tearDown(self):\n shutil.rmtree(self._data_dir_path)", "def clear_data_home(data_home=None):\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "async def delete_raw_data():\n await expire_directories(\".rewrite\", REWRITE_DAYS)\n await expire_directories(\"undelete\", UNDELETE_DAYS)\n\n cutoff = datetime.now(timezone.utc) - timedelta(days=DATA_LAKE_DAYS)\n # wraparound to previous month, just in case\n last_month = cutoff - timedelta(days=cutoff.day + 1)\n for day in (\n last_month,\n cutoff,\n ):\n await expire_directories(\n storage.iothub_data_dir + day.strftime(\"/%Y/%m\"), DATA_LAKE_DAYS,\n )", "def clear_data_home(data_home: str = None):\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)", "def tearDown(self):\n super().tearDown()\n shutil.rmtree(DATA_DIR, ignore_errors=True)", "def create_backup():\n\tif not os.path.exists(backup_directory):\n\t\tprint(\"Creating backup folder as 'FINGER DATA BACKUP'...\")\n\t\tos.makedirs(backup_directory)\n\t\tfor directory in directories:\n\t\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\t\tprint(\"Backup creation complete!\")\n\telse:\n\t\tprint(\"Backup already exists. If data is missing, it must be manually moved to the backup.\")", "def recreate():\n from data.seed import Seed\n\n if click.confirm(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.create_all()\n Seed().data()", "def eraseDatas(folderToRemove='datas'):\n directoryToRemove = os.path.join(pathtofolder(), folderToRemove)\n for i in os.listdir(directoryToRemove):\n os.remove(os.path.join(directoryToRemove, i))\n os.rmdir(directoryToRemove) # Now the folder is empty of files\n pass", "def force_invalidate_all(self):\r\n safe_mkdir(self._root, clean=True)", "def delete_previous_files():\n def delete(root: Path):\n shutil.rmtree(root / 'output', ignore_errors=True)\n for p in root.iterdir():\n if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):\n p.unlink()\n\n delete(wt_registration_dir)\n delete(mut_registration_dir)", "def cleanAllArmiTempDirs(olderThanDays: int) -> None:\n from armi.utils.pathTools import cleanPath\n\n gracePeriod = datetime.timedelta(days=olderThanDays)\n now = datetime.datetime.now()\n thisRunFolder = os.path.basename(_FAST_PATH)\n\n for dirname in os.listdir(APP_DATA):\n dirPath = os.path.join(APP_DATA, dirname)\n if not os.path.isdir(dirPath):\n continue\n try:\n fromThisRun = dirname == thisRunFolder # second chance to delete\n _rank, dateString = dirname.split(\"-\")\n dateOfFolder = datetime.datetime.strptime(dateString, \"%Y%m%d%H%M%S%f\")\n runIsOldAndLikleyComplete = (now - dateOfFolder) > gracePeriod\n if runIsOldAndLikleyComplete or fromThisRun:\n # Delete old files\n cleanPath(dirPath, mpiRank=MPI_RANK)\n except: # noqa: bare-except\n pass", "def clean_up(self):\n directory = os.path.join(os.getcwd(), self.TMP_FOLDER)\n if os.path.exists(directory) and os.path.isdir(directory):\n shutil.rmtree(directory)", "def clear_cache(self):\n local_app_data = os.getenv('LOCALAPPDATA')\n edge_root = os.path.join(local_app_data, 'Packages',\n 'Microsoft.MicrosoftEdge_8wekyb3d8bbwe')\n directories = ['AC', 'AppData']\n for directory in directories:\n path = os.path.join(edge_root, directory)\n try:\n shutil.rmtree(path)\n except Exception:\n pass", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])", "def delete_old():\n folder = '../build/data/vtk'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path) # subdirs\n except Exception as e:\n print(e)\n folder = '../build/log'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n # elif os.path.isdir(file_path): shutil.rmtree(file_path) #subdirs\n except Exception as e:\n print(e)", "def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')", "def tearDown(self):\n if self.workdir and os.path.exists(self.workdir):\n shutil.rmtree(self.workdir)", "def delete_temp_folder():\n\n tempFolder = os.path.join(os.getenv(\"APPDATA\"), \"GARI\\Temp\")\n\n if os.path.exists(tempFolder):\n for file in os.listdir(tempFolder):\n arcpy.Delete_management(os.path.join(tempFolder, file))", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def purge_deleted_directories(self):\n registered = {safe_filename(obj.name) for obj in self}\n bad_directories = [\n self._base_data_dir / dirname\n for dirname in os.listdir(self._base_data_dir)\n if (self._base_data_dir / dirname).is_dir() and dirname not in registered\n ]\n\n for fp in bad_directories:\n shutil.rmtree(fp)\n\n return len(bad_directories)", "def tearDown(self):\n # unittest.TestCase.tearDown(self)\n\n root = os.path.join(\".\", \"files\")\n endingList = os.listdir(root)\n rmList = [fn for fn in endingList if fn not in self.startingList]\n\n if self.oldRoot == root:\n for fn in rmList:\n fnFullPath = os.path.join(root, fn)\n if os.path.isdir(fnFullPath):\n os.rmdir(fnFullPath)\n else:\n os.remove(fnFullPath)\n\n os.chdir(self.oldRoot)", "def __removeBackup(self):\n pass #FIXME!!" ]
[ "0.6959087", "0.6932837", "0.67664707", "0.6507157", "0.6495482", "0.63897157", "0.63854295", "0.63776475", "0.6273897", "0.6213164", "0.615991", "0.61418915", "0.6131707", "0.60488254", "0.60122275", "0.6010502", "0.60085917", "0.6005368", "0.5998081", "0.5968873", "0.5965292", "0.5951169", "0.5872175", "0.5852484", "0.58503515", "0.58444166", "0.58410394", "0.58332", "0.58233976", "0.58223176" ]
0.693683
1
Initial stab at the requires_numpy function raises a warning
def requires_numpy(f): @wraps(f) def wrapper(*args, **kwargs): if _NUMPY_AVAILABLE: return f(*args, **kwargs) else: raise Warning("Numpy not available. Cannot call %s" % f.__name__) return f(*args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, strict=True):\n self.strict = strict\n self.testwithoutnp = test_without_numpy()", "def with_numpy(func):\r\n return func", "def check_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().cpu().numpy()\n x = np.asarray(x)\n assert isinstance(x, np.ndarray)\n return x", "def np(self, *args, **kwargs):\n raise NotImplementedError('numpy is unavailable on your system. Please install numpy before calling plist.np().')", "def check_numpy_compatibility():\n def get_digits(string):\n \"\"\"\n Returns digits of string\n by removing all other characters\n \"\"\"\n digit_str= ''.join(filter(lambda x: x.isdigit(), string))\n return digit_str\n\n main, sub, patch = np.__version__.split('.')\n main = int(get_digits(main))*1000\n sub = int(get_digits(sub))*10\n patch = int(get_digits(patch))\n logging.debug(f'Version number of numpy is {main+sub+patch}')\n if main+sub+patch >= 1163: # 1.16.3 --> 1000+160+3\n return False\n else:\n return True", "def with_numpy(func):\r\n def my_func():\r\n raise nose.SkipTest('Test requires numpy')\r\n return my_func", "def use_numpy(request: Any) -> Any:\n return request.param", "def use_numpy(request: Any) -> Any:\n return request.param", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def verify_numpy_type(self, matrix):\n if type(matrix) != np.ndarray and matrix != None:\n return np.asfarray(matrix)\n elif type(matrix) == np.ndarray and matrix != None:\n return matrix", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def __NDim_restriction_correct_ndarray_number(self):\n\n strTestName = 'The number of dimensions in a Numpy array higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimH('parameter1', 1)\n\n RxCSObject.parameter1 = np.random.rand(3, 4)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __NDim_restriction_incorrect_ndarray_ndarray(self):\n\n strTestName = 'The number of dimensions in a Numpy array lower than the number of dimensions in another Numpy array (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('vRefParameter1', 'Numpy array reference parameter')\n RxCSObject.paramType('vRefParameter1', np.ndarray)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimL('parameter1', 'vRefParameter1', mul=2)\n\n RxCSObject.vRefParameter1 = np.random.rand(4)\n RxCSObject.parameter1 = np.random.rand(2, 1, 9)\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def check_is_numpy(X):\n if isinstance(X, list):\n return np.asarray(X)\n if isinstance(X, np.ndarray):\n return X\n raise ValueError(\n \"Expected an python list or numpy array as input \"\n \"but got {}\".format(str(type(X)))\n )", "def setup_test():\n if LooseVersion(np.__version__) >= LooseVersion('1.14'):\n np.set_printoptions(legacy='1.13')\n\n # Temporary fix until scipy release in October 2018\n # must be removed after that\n # print the first occurrence of matching warnings for each location\n # (module + line number) where the warning is issued\n if (\n LooseVersion(np.__version__) >= LooseVersion('1.15')\n and LooseVersion(scipy.version.short_version) <= '1.1.0'\n ):\n warnings.simplefilter('default')", "def test_numpy_supported_version(self):\r\n min_acceptable_version = (1, 5, 1)\r\n max_acceptable_version = (1, 7, 1)\r\n try:\r\n from numpy import __version__ as numpy_lib_version\r\n version = tuple(map(int, numpy_lib_version.split('.')))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(numpy_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n self.assertTrue(pass_test,\r\n \"Unsupported numpy version. Must be >= %s and <= %s , but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, max_acceptable_version)),\r\n version_string))", "def catch_np_warning(msg, exception=FloatingPointError):\n assert issubclass(exception, Exception)\n\n def wrapper(func):\n def wrapped(e):\n old_settings = np.geterr()\n np.seterr(invalid=\"raise\")\n try:\n return func(e)\n except exception:\n warnings.warn(msg + \" \" + e.fname, exc.EmptyStep)\n np.seterr(**old_settings)\n return wrapped\n return wrapper", "def __NDim_restriction_incorrect_ndarray_number(self):\n\n strTestName = 'The number of dimensions in a Numpy array higher or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimHE('parameter1', 3)\n\n RxCSObject.parameter1 = np.random.rand(3, 4)\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def __NDim_restriction_correct_ndarray_parameter(self):\n\n strTestName = 'The number of dimensions in a Numpy array lower or equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimLE('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = np.random.rand(3, 4, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def ignore_numpy_warning(test_func):\n def do_test(self, *args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n message='numpy.ufunc size changed')\n test_func(self, *args, **kwargs)\n return do_test", "def check_np_array_nan(func):\r\n\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n result = func(*args, **kwargs)\r\n if type(result) in [tuple, list]:\r\n count = 0\r\n for an_array in result:\r\n if type(an_array) is dict:\r\n for key in an_array:\r\n if np.isnan(an_array[key]).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th dict are:\\n\"\r\n )\r\n hydro_logger.warning(\"value of \" + key + \":\\n\")\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array[key])))\r\n else:\r\n if np.isnan(an_array).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th array are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array)))\r\n count = count + 1\r\n elif type(result) is np.array:\r\n if np.isnan(result).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(result)))\r\n return result\r\n\r\n return wrapper", "def __NDim_restriction_incorrect_ndarray_parameter(self):\n\n strTestName = 'The number of dimensions in a Numpy array equals a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = np.random.rand(3, 4)\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def is_numpy(obj):\n return 'numpy' in str(type(obj))", "def __DimSiz_restriction_incorrect_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array [pedantic] (3) (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 1, 'pages', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)", "def __NDim_restriction_correct_ndarray_ndarray(self):\n\n strTestName = 'The number of dimensions in a Numpy array equals the number of dimensions in another Numpy array (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('m4RefParameter1', 'Numpy array reference parameter')\n RxCSObject.paramType('m4RefParameter1', np.ndarray)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimHE('parameter1', 'm4RefParameter1', add=1)\n\n RxCSObject.m4RefParameter1 = np.random.rand(4, 2, 9)\n RxCSObject.parameter1 = np.random.rand(2, 1, 9, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array [pedantic] (3) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'rows', 'columns', pedantic=1, add=1)\n\n RxCSObject.parameter1 = np.random.randn(4, 3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_number2(self):\n\n strTestName = 'The number of columns of a Numpy array lower or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 3, 'columns')\n\n RxCSObject.parameter1 = np.random.randn(3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def load_numpy_distutils_misc_util(finder, module):\n module.IgnoreName(\"numscons\")" ]
[ "0.6546872", "0.63643485", "0.62574387", "0.6253495", "0.6237235", "0.6213361", "0.6105712", "0.6105712", "0.60214335", "0.5940299", "0.59354603", "0.5872245", "0.5869582", "0.57768196", "0.57631403", "0.57466626", "0.5744809", "0.57393456", "0.57375014", "0.5700972", "0.57002383", "0.5671002", "0.56644255", "0.56523454", "0.562384", "0.5622346", "0.5617496", "0.55986917", "0.5598131", "0.55787903" ]
0.72376585
0
Rate a single value based on linear interpolation.
def rate (self, indep): data = self.data index = len(data)-2 for i in xrange(len(data)-1): if indep < float(data[i+1][0]): index = i break return self.interpolateValue(float(data[index][0]),float(data[index][2]),float(data[index+1][0]),float(data[index+1][2]),indep)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed_interpolation(val):\n if val == 0.5:\n return 1.0\n elif val < 0.5:\n return low_interp(val)\n else:\n return hi_interp(val)", "def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)", "def rate(self, t, i):\n return np.real(self._rates[i](t))", "async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)", "def _approximate_constant(self, value, num, lobatto, use_mp, dps):\n self._approximate(lambda x: value, num, lobatto, use_mp, dps)", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def constantLearningRate(rate):\n def function(t):\n return rate\n return function", "def rate(self, t, y):\n if y[1] >= self.parameters.T:\n return super(SFORT, self).rate(t, y)\n else:\n return 0", "def accelerate_value(current, desired, rate, dt):\n if(desired == current):\n return desired\n\n if(desired == 0):\n return 0\n\n if(desired < current):\n rate = -rate\n\n new_value = current + rate * dt /1000\n if(abs(new_value) > abs(desired)):\n new_value = desired\n return new_value", "def intensity(self, value: int, /) -> None:", "def interpolated(self, Any, Any_1, p_float): # real signature unknown; restored from __doc__\n pass", "def rate_density(x, a):\n return a * x", "def reverseRate (self, indep):\n data = self.data\n index = len(data)-2\n for i in xrange(len(data)-1):\n if indep < float(data[i+1][2]):\n index = i\n break\n return self.interpolateValue(float(data[index][2]),float(data[index][0]),float(data[index+1][2]),float(data[index+1][0]),indep)", "def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")", "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y", "def interpolate(self, freq, data):\n func = scipy.interpolate.interp1d(\n freq, data,\n kind='nearest',\n copy=False,\n assume_sorted=True,\n bounds_error=False,\n fill_value=np.nan,\n )\n return func(self.freq)", "def x_rate(self):\n values = self._interpolate_table(\"x\", derivative_order=1)\n # values += self._corrections(('ortho_eop', iers.ortho_eop, 0, 1e-6),\n # ('pmsdnut2', iers.pmsdnut2, 0, 1e-6))\n return values", "def linear_interpolate_value_at_time(t0, v0, t1, v1, t):\n return v0 + linear_interpolate_value_change(t0, v0, t1, v1, t - t0)", "def interpolate(self, y, x=None, kind='cubic', num=None, lobatto=True,\n use_mp=False, dps=None):\n if x is None:\n x = np.linspace(float(self.a), float(self.b), len(y))\n f = interp1d(x, y, kind=kind)\n self.approximate(lambda x: float(f(float(x))),\n num=num, lobatto=lobatto, use_mp=use_mp, dps=dps)", "def input_data_sample_rate(self, value):\n self._input_data_sample_rate = value", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def Interpolator(X, Y, TimeleftIndex, TimeRightIndex,YValue):\n Y1 = Y[TimeleftIndex]\n Y2 = Y[TimeRightIndex]\n X2 = X[TimeRightIndex]\n X1 = X[TimeleftIndex]\n slope = (Y2 - Y1) / (X2 - X1)\n if slope != 0:\n X0 = (YValue - Y1) / slope + X1\n return X0\n else:\n return 0", "def sample(self, i):\n\t\tif self.nsamp % 2 == 0:\n\t\t\ti0 = i-(self.nsamp/2-1)\n\t\t\ti1 = i+(self.nsamp/2)\n\t\telse:\n\t\t\ti0 = i-((self.nsamp-1)/2.)\n\t\t\ti1 = i+((self.nsamp-1)/2.)\n\t\treturn self.yval[i0:i1]", "def linear_interp(x,y,xi) :\n \n f = interp1d(x,y,kind='linear')\n yi = f(xi)\n \n return yi", "def next(self, dt):\n self.x = self.x + \\\n (self.rate-0.5*self.vola*self.vola)*dt + \\\n sqrt(dt)*self.vola*np.random.normal()\n return exp(self.x)", "def linear(minVal, maxVal, newMin, newMax, value):\n coef = ((float(value) - float(minVal)) * 100) / (float(maxVal) - float(minVal))\n newVal = float(newMin) + ((coef * (float(newMax) - float(newMin))) / 100)\n return newVal", "def extrusionRate(self, value):\n if (not self.freezeExRate):\n self.__extrusionRate = value" ]
[ "0.6729182", "0.6307266", "0.62642294", "0.61200744", "0.59919167", "0.5961092", "0.58677727", "0.5847443", "0.58286476", "0.5828598", "0.5707358", "0.5697539", "0.5661246", "0.5659527", "0.56492877", "0.5610367", "0.56028885", "0.5597788", "0.559511", "0.55597943", "0.5552239", "0.55326843", "0.55326843", "0.5491799", "0.5485289", "0.54842186", "0.54831725", "0.5472207", "0.54712397", "0.5440681" ]
0.6847074
0
Reverse rate a single value based on linear interpolation
def reverseRate (self, indep): data = self.data index = len(data)-2 for i in xrange(len(data)-1): if indep < float(data[i+1][2]): index = i break return self.interpolateValue(float(data[index][2]),float(data[index][0]),float(data[index+1][2]),float(data[index+1][0]),indep)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverse_rate(rate_tuple):\n return 1 / rate_tuple[2]", "def rate (self, indep):\n data = self.data\n index = len(data)-2\n for i in xrange(len(data)-1):\n if indep < float(data[i+1][0]):\n index = i\n break\n return self.interpolateValue(float(data[index][0]),float(data[index][2]),float(data[index+1][0]),float(data[index+1][2]),indep)", "def reverseRateTS (self,ts):\n output = []\n for line in ts.data:\n output.append([line[0],self.rate2(line[1]),line[2]])\n return timeseries(output)", "def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")", "def speed_interpolation(val):\n if val == 0.5:\n return 1.0\n elif val < 0.5:\n return low_interp(val)\n else:\n return hi_interp(val)", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x", "def inverseTimeLearningRate(rate, k=1):\n def function(t):\n return float(rate)/t**k\n return function", "def interpolate_inverse(series, **options):\n inverse = pd.Series(series.index, index=series.values)\n interp_func = interpolate(inverse, **options)\n return interp_func", "def rlerp(from_val, to_val, value):\n\tfactor = (value-from_val) / (to_val-from_val)\n\treturn factor", "def lerp(first, second, ratio):\n return first * (1 - ratio) + second * ratio", "def reverse_l_r(self, x_value, z_value):\r\n\t\t#~~~ x-values ~~~\r\n\t\tdiff_temp = np.diff(x_value) \t\t\t\t#step in x-values\r\n\t\tdiff_reverse = diff_temp[::-1]\t\t\t\t#reversing the step values above\r\n\t\ttemp = np.cumsum(diff_reverse)\t\t\t\t#computing cumulative sum on differences\r\n\t\tx_value = np.insert(temp, 0,0)\t\t\t\t#adding the initial zero-value\r\n\t\t\r\n\t\t#~~~ z-values ~~~\r\n\t\tz_value = z_value[::-1]\t\t\t\t\t\t#reversing z-values\r\n\t\t\r\n\t\treturn x_value, z_value", "def y_rate(self):\n values = self._interpolate_table(\"y\", derivative_order=1)\n # values += self._corrections(('ortho_eop', iers.ortho_eop, 0, 1e-6),\n # ('pmsdnut2', iers.pmsdnut2, 0, 1e-6))\n return values", "def Interpolator(X, Y, TimeleftIndex, TimeRightIndex,YValue):\n Y1 = Y[TimeleftIndex]\n Y2 = Y[TimeRightIndex]\n X2 = X[TimeRightIndex]\n X1 = X[TimeleftIndex]\n slope = (Y2 - Y1) / (X2 - X1)\n if slope != 0:\n X0 = (YValue - Y1) / slope + X1\n return X0\n else:\n return 0", "def inv_lerp(x, low, high):\n return (x - low) / (high - low)", "def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)", "def resample_reflectance(self):\n\n if hasattr(self, \"rwl\"):\n p = interp1d(self.rwl, self.rfl, fill_value=\"extrapolate\")\n self.rfl = p(self.wl)", "def reverse_CDF(self):\n self.series_y = 1. - self.series_y\n self.quantile_series_y_lower = 1. - self.quantile_series_y_lower\n self.quantile_series_y_upper = 1. - self.quantile_series_y_upper", "def flipper(s, rate, p, whitetile):\r\n if rate > p and whitetile:\r\n #flip spin\r\n return -s\r\n else:\r\n #don't flip spin\r\n return s", "def interpol(x, X, Y):\n \n for idx, xx in enumerate(X):\n if x <= xx:\n break\n \n x2 = xx \n y2 = Y[idx]\n x1 = X[idx-1] \n y1 = Y[idx-1] \n y = (y2-y1)/(x2-x1)*(x-x1) + y1\n \n return y", "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n x=(float(val)-float(input_interval_start))\n y=(float(input_interval_end)-float(input_interval_start))\n z=x/y\n return output_interval_start+(z*(output_interval_end-output_interval_start))", "def rate(self, t, i):\n return np.real(self._rates[i](t))", "def relu(z: float) -> float:\n return z if z > 0 else 0.01 * z", "def _downsample(f):\n downx, downy = hl.funcs(\"downx downy\")\n downx[x, y, hl._] = (\n f[2 * x - 1, y, hl._]\n + 3.0 * (f[2 * x, y, hl._] + f[2 * x + 1, y, hl._])\n + f[2 * x + 2, y, hl._]\n ) / 8.0\n downy[x, y, hl._] = (\n downx[x, 2 * y - 1, hl._]\n + 3.0 * (downx[x, 2 * y, hl._] + downx[x, 2 * y + 1, hl._])\n + downx[x, 2 * y + 2, hl._]\n ) / 8.0\n return downy", "def resample_sliprate(self, dt, nsamp):\n t_new = np.linspace(0, nsamp * dt, nsamp, endpoint=False)\n t_old = np.linspace(0, self.dt * len(self.sliprate),\n len(self.sliprate), endpoint=False)\n\n self.sliprate = interp(t_new, t_old, self.sliprate)\n self.dt = dt", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def raw():\n return (((.25 * x) + .75) * x - 1.5) * x - 2", "def cur_approx(self):\n return invert_normal_params(self.Q, self.r)", "def inverse_transform(self, Xt):\n return np.clip(\n super(Real, self).inverse_transform(Xt).astype(float), self.low, self.high\n )", "def randomInverseTimeLearningRate(rate):\n def function(t):\n return random() * float(rate)/t\n return function" ]
[ "0.67719895", "0.642719", "0.6261974", "0.6227211", "0.59597015", "0.591231", "0.5859282", "0.5853899", "0.58244973", "0.5791869", "0.5731416", "0.57247967", "0.56826323", "0.5661443", "0.56369877", "0.5628915", "0.56039304", "0.55694044", "0.5539542", "0.5478829", "0.5469235", "0.540209", "0.5374722", "0.5358939", "0.53531265", "0.53413594", "0.53304225", "0.5317372", "0.53159714", "0.531575" ]
0.81297004
0
Generates a new time series with reverserated values from another
def reverseRateTS (self,ts): output = [] for line in ts.data: output.append([line[0],self.rate2(line[1]),line[2]]) return timeseries(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n if self.fixed:\n return df\n else:\n window = self.window\n if trans_method == 'original':\n staged = self.first_values\n diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(\n len(df.index) - window\n )\n temp_cols = diffed.columns\n for n in range(len(diffed.index)):\n temp_index = diffed.index[n]\n temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[\n n\n ].reset_index(drop=True).astype(float)\n temp_row = pd.DataFrame(\n temp_row.values.reshape(1, len(temp_row)), columns=temp_cols\n )\n temp_row.index = pd.DatetimeIndex([temp_index])\n staged = pd.concat([staged, temp_row], axis=0)\n return staged\n\n # current_inversed = current * window - cumsum(window-1 to previous)\n if trans_method == 'forecast':\n staged = self.last_values\n df = pd.concat([self.last_rolling, df], axis=0)\n diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(\n len(df.index)\n )\n diffed = diffed.tail(len(diffed.index) - 1)\n temp_cols = diffed.columns\n for n in range(len(diffed.index)):\n temp_index = diffed.index[n]\n temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[\n n\n ].reset_index(drop=True).astype(float)\n temp_row = pd.DataFrame(\n temp_row.values.reshape(1, len(temp_row)), columns=temp_cols\n )\n temp_row.index = pd.DatetimeIndex([temp_index])\n staged = pd.concat([staged, temp_row], axis=0)\n staged = staged.tail(len(diffed.index))\n return staged", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n\n if trans_method == 'original':\n df = pd.concat(\n [self.first_values, (df - df.shift(1)).tail(df.shape[0] - 1)], axis=0\n )\n return df\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n df = df - df.shift(1)\n return df.tail(df_len)", "def transform(self, y=None):\n\n df = self.X.copy()\n num_days = (\n int(\n np.timedelta64((max(df[\"date\"]) - min(df[\"date\"])), \"D\")\n / np.timedelta64(1, \"D\")\n )\n + 1\n )\n start = pd.to_datetime(min(df[\"date\"]))\n dates = [(start + np.timedelta64(i, \"D\")) for i in range(num_days)]\n\n seq = pd.DataFrame({\"dt_time\": dates, \"day_seq\": np.arange(num_days)})\n seq[\"date\"] = seq[\"dt_time\"].dt.date\n\n df1 = df.join(seq.set_index(\"date\"), on=\"date\")\n\n df1[\"year\"] = df1[\"dt_time\"].dt.year\n df1[\"month\"] = df1[\"dt_time\"].dt.month\n df1[\"day\"] = df1[\"dt_time\"].dt.day\n df1[\"day_of_week\"] = df1[\"dt_time\"].dt.weekday\n df1[\"month_day\"] = df1[\"dt_time\"].dt.strftime(\"%m/%d\")\n df1[\"month_weekday\"] = df1[\"dt_time\"].dt.strftime(\"%b_%a\")\n df1[\"month\"] = df1[\"dt_time\"].dt.strftime(\"%m/%d\")\n return df1", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n tile_len = len(self.tile_values_lag_1.index)\n df_len = df.shape[0]\n sdf = pd.DataFrame(\n np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))\n )\n if trans_method == 'original':\n sdf = sdf.tail(df_len)\n else:\n sdf = sdf.head(df_len)\n sdf.index = df.index\n sdf.columns = df.columns\n return df + sdf", "def time_series(self, length):\n maker = r.Recomposer(self._components, self.bias)\n return maker.time_series(length)", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True, stride=None, dates=False, leaks=True):\n df = pd.DataFrame(data)\n \n time = None\n if 'date' in df.columns:\n time = 'date'\n elif 'time' in df.columns:\n time = 'time'\n if time != None:\n df = df.drop([time], axis=1)\n \n if 'leak' in df.columns:\n df = df.drop(['leak'], axis=1) \n n_vars = df.shape[1]\n times_column = list()\n if dates and time != None:\n times_column = data[time]\n del data\n \n cols, names, pivots = list(), list(), list()\n \n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n agg = pd.concat(cols, axis=1)\n \n agg.columns = names\n\n #stride - delete windows\n if stride != None:\n indexes_to_drop = list()\n for i in range(stride, agg.shape[0], stride):\n print(\"index\", i)\n pivots += [i]\n \n onset = 0\n offset = pivots[0]\n for i in range(0, len(pivots)):\n print(\"onset\", onset)\n print(\"offset\", offset)\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n try:\n onset = pivots[i] + 1\n offset = pivots[i+1]\n \n except IndexError:\n onset = pivots[i] + 1\n offset = agg.shape[0]\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n \n \n \n print(\"indexes_to_drop\", indexes_to_drop)\n \n agg.drop(df.index[indexes_to_drop], inplace=True)\n \"\"\"\n if dates and time!=None:\n agg[time] = times_column\n \"\"\" \n # drop rows with NaN values \n if dropnan:\n agg.dropna(inplace=True)\n \n\n return agg", "def invert( self ) :\n\n series_ = self.copy( )\n for l in xrange( 1, len( series_ ), 2 ) : series_.coefficients[l] *= -1\n return( series_ )", "def cross_timeseries(series1, series2):\n\n ts_new1 = []\n val_new1 = []\n\n ts_new2 = []\n val_new2 = []\n\n for i in range(len(series1[1])):\n # for j in range(len(series2[1])):\n if series1[1][i] in series2[1]:\n ts_new1.append(series1[1][i])\n val_new1.append(series1[0][i])\n ts_new2.append(series2[1][series2[1].index(series1[1][i])])\n val_new2.append(series2[0][series2[1].index(series1[1][i])])\n\n return [val_new1, ts_new1], [val_new2, ts_new2]", "def getNewDF_Y(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF\n returnVal = pd.concat(new_temps)\n return returnVal", "def invert_transformation(df_train, df_forecast, second_diff=False):\n df_fc = df_forecast.copy()\n columns = df_train.columns\n for col in columns: \n # Roll back 2nd Diff\n if second_diff:\n df_fc[str(col)+'_1d'] = (df_train[col].iloc[-1]-df_train[col].iloc[-2]) + df_fc[str(col)+'_2d'].cumsum()\n # Roll back 1st Diff\n df_fc[str(col)+'_forecast'] = df_train[col].iloc[-1] + df_fc[str(col)+'_1d'].cumsum()\n return df_fc", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n lag = self.lag\n # add last values, group by lag, cumsum\n if trans_method == 'original':\n df = pd.concat([self.first_values, df.tail(df.shape[0] - lag)])\n return df.cumsum()\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n if df.isnull().values.any():\n raise ValueError(\"NaN in DifferencedTransformer.inverse_transform\")\n return df.cumsum().tail(df_len)", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = (df + 1).replace([0], np.nan)\n df = df.fillna((df[df != 0]).abs().min()).fillna(0.1)\n\n # add last values, group by lag, cumprod\n if trans_method == 'original':\n df = pd.concat([self.first_values, df.tail(df.shape[0] - 1)], axis=0)\n return df.cumprod()\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n return df.cumprod().tail(df_len)", "def dataTimeSeries(timesteps,df,predictors,target,dropnan,out=2,dropVars=True): \r\n \r\n series = series_to_supervised(df[predictors+[target]].copy(),timesteps,out,dropnan=dropnan)\r\n \r\n if dropnan==False:\r\n series.replace(pd.np.nan,0,inplace=True)\r\n \r\n # Dropping other variables:\r\n if dropVars:\r\n index = list(np.arange(series.shape[1]-2,\r\n series.shape[1]-len(predictors)-2,\r\n -1))\r\n \r\n labels = [item for idx,item in enumerate(series.columns) \r\n if idx in index]\r\n \r\n #print(\"Eliminando variáveis: {}\".format(labels))\r\n series.drop(labels,axis=1,inplace=True) \r\n \r\n return series", "def downsample(time_series,res = '0.2S'):\n\n Nvalues = len(time_series.index)\n samplerate = 1/ ((time_series.timestamp[Nvalues-1] - time_series.timestamp[0]) / Nvalues)\n timestart = dt.datetime(1970, 1, 1, 0, 0, 0, 0) #dt.datetime.now()\n start = pd.Timestamp(timestart)\n end = pd.Timestamp(timestart + dt.timedelta(seconds=Nvalues/samplerate))\n t = np.linspace(start.value, end.value, Nvalues)\n t = pd.to_datetime(t)\n time_series['time'] = t\n time_series = time_series.resample(res,on='time').mean() # downsample to 0.2 second intervals\n time_series.index.name = 'time'\n time_series.reset_index(inplace=True)\n return time_series", "def __sub__(self, other):\n if not isinstance(other, SMTimeSeries):\n raise TypeError(\"NotImplemented Error\")\n ts = self._fsm.get(self._id) - other._fsm.get(other._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)", "def test_get_drift_timeseries(self):\n # NB inputs need to be to be lists\n ref1 = 1.0\n ref2 = 2.0\n data = {'target': {'reflectance': [ref1,]},\n 'reference': {'reflectance': [ref2,]}\n }\n doublet = ([0, 0], )\n result = librad_drift.RadiometricDrift.get_drift_timeseries(data, doublet)\n self.assertEquals(result, [ref1/ref2, ]) # Output will be a list", "def createIndoorTimeseries(ts, insulation):\r\n result = ts.copy()\r\n result.correct(-INSULATIONS[insulation])\r\n result.addLevel(BACKGROUND)\r\n return result", "def reverse_difference():", "def test_differencer_same_series(y, lags, index_type):\n if index_type == \"int\":\n y = y.reset_index(drop=True)\n transformer = Differencer(lags=lags, na_handling=\"drop_na\")\n y_transform = transformer.fit_transform(y)\n y_reconstructed = transformer.inverse_transform(y_transform)\n\n # Reconstruction should return the reconstructed series for same indices\n # that are in the `Z` timeseries passed to inverse_transform\n _assert_array_almost_equal(y.loc[y_reconstructed.index], y_reconstructed)", "def transform(self, x):\n if self.mode == 'lag':\n shifted = [x.shift(-abs(o), axis=0) for o in self.offsets]\n return pd.concat(shifted, axis=1)", "def transform(self, y=None):\n num_days = (\n int(\n np.timedelta64(\n pd.to_datetime(self.end_date) - pd.to_datetime(self.start_date), \"D\"\n )\n / np.timedelta64(1, \"D\")\n )\n + 1\n )\n dates = [\n (pd.to_datetime(self.start_date) + np.timedelta64(i, \"D\"))\n for i in range(num_days)\n ]\n start_seq = int(\n (\n np.timedelta64(\n pd.to_datetime(self.start_date) - pd.to_datetime(self.model_end[0]),\n \"D\",\n )\n + self.model_end[1]\n )\n / np.timedelta64(1, \"D\")\n )\n df = pd.DataFrame(\n {\"dt_time\": dates, \"day_seq\": np.arange(start_seq, start_seq + num_days)}\n )\n df[\"date\"] = df[\"dt_time\"].dt.date\n df[\"year\"] = df[\"dt_time\"].dt.year\n df[\"month\"] = df[\"dt_time\"].dt.month\n df[\"day\"] = df[\"dt_time\"].dt.day\n df[\"day_of_week\"] = df[\"dt_time\"].dt.weekday\n df[\"month_day\"] = df[\"dt_time\"].dt.strftime(\"%m/%d\")\n df[\"month_weekday\"] = df[\"dt_time\"].dt.strftime(\"%b_%a\")\n return df", "def _build_forecast_series(self,\n points_preds: np.ndarray) -> TimeSeries:\n\n time_index = self._generate_new_dates(len(points_preds))\n\n return TimeSeries.from_times_and_values(time_index, points_preds, freq=self.training_series.freq())", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def stepify(times, values):\n new_times = np.empty((2*times.size - 1,))\n new_values = np.empty_like(new_times)\n new_times[::2] = times\n new_times[1::2] = times[1:]\n new_values[::2] = values\n new_values[1::2] = values[:-1]\n return new_times, new_values", "def ts_resample(self):\n try:\n ts_freq = pd.DataFrame(\n index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),\n columns=['dummy'])\n except ValueError:\n self._uvts_cls_logger.exception(\"Exception occurred, possibly incompatible frequency!\")\n sys.exit(\"STOP\")\n\n if self.fill_method == 'ffill':\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n self.ts_df.y = self.ts_df.y.fillna(method='ffill')\n # if np.isnan ( self.ts_df.y ).any ():\n # self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )\n else: # interp\n xp = np.linspace(0, self.ts_df.size, self.ts_df.size, endpoint=False)\n fp = self.ts_df['y']\n # join\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n # pick new points\n x = np.linspace(0, ts_freq.size, ts_freq.size, endpoint=False)\n x = x[self.ts_df['y'].isna()]\n print(x.size)\n print(x)\n\n # put the values\n self.ts_df.y[self.ts_df['y'].isna()] = np.interp(x, xp, fp)\n\n if np.isnan(self.ts_df.y).any():\n self._uvts_cls_logger.warning(\"Some NaN found, something went wrong, check the data!\")\n sys.exit(\"STOP\")\n\n self._uvts_cls_logger.info(\"Time series resampled at frequency: \" + str(self.ts_df.index.freq) +\n \". New shape of the data: \" + str(self.ts_df.shape))\n self._uvts_cls_logger.info(\"Using time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n return self", "def Series4():\n\n fruits4 = ask() # call function to take default list or execute Series1\n\n fruits4_copy = fruits4[:]\n for i, item in enumerate(fruits4):\n item_copy = list(item)\n item_copy.reverse()\n item_copy = \"\".join(item_copy)\n fruits4_copy[i] = item_copy\n fruits4_copy.pop()\n print('original=', fruits4, '\\n', 'copy=', fruits4_copy)", "def generate_fake_timeseries(fr, to, interval=60000, create=4):\n fr_timestamp = int(datetime.datetime.strptime(fr[:-5]+'+0000', \"%Y-%m-%dT%H:%M:%S%z\").timestamp()*1000)\n to_timestamp = int(datetime.datetime.strptime(to[:-5]+'+0000', \"%Y-%m-%dT%H:%M:%S%z\").timestamp()*1000)\n return_series = []\n for r in range(0,create):\n val = random.random()*100\n series_obj = {'target':'series'+str(r+1), 'datapoints':[]}\n series_obj['datapoints'] = [[val, fr_timestamp]]\n a = fr_timestamp\n while a < to_timestamp:\n a += interval\n val += random.randint(-1,1)\n series_obj['datapoints'].append([val, a])\n return_series.append(series_obj)\n return return_series", "def generate_timeseries(F=F, H=H, stop=2000, x0=np.array([-0.72, -0.64]),\n R_v=np.eye(2)*0, R_n=np.eye(2)*0.001):\n dim = 2 # Number of dimensions for the system\n U, Y = [], []\n\n x = x0\n for k in range(stop):\n U.append(u(k, dim))\n x = F(x, U[-1]) + np.random.multivariate_normal(np.zeros(dim), R_v)\n Y.append(H(x) + np.random.multivariate_normal(np.zeros(dim), R_n))\n\n return U, Y, R_v, R_n" ]
[ "0.56737614", "0.56737614", "0.56249565", "0.5581555", "0.55597603", "0.5551499", "0.5501329", "0.5489588", "0.5466132", "0.5429593", "0.5420211", "0.540473", "0.53927773", "0.5344522", "0.52980465", "0.5276862", "0.5262544", "0.52320635", "0.522053", "0.5171074", "0.51193786", "0.5116662", "0.5094705", "0.5070847", "0.5069973", "0.5069292", "0.5060705", "0.5043752", "0.504172", "0.5040773" ]
0.59851074
0
Tests loading config with a module callable that has no config.
def test_none_in_config(self, mocked_callable_loader, mocked_load_config): config_filename = 'aconfigfile' importer.Finder(config_filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config(self, mocked_callable_loader, mocked_load_config):\n config_filename = 'aconfigfile'\n finder = importer.Finder(config_filename)\n mocked_load_config.assert_called_once_with(config_filename)\n\n module_config = finder.config.get('fake_package.fake_module')\n self.assertTrue(module_config is not None)\n self.assertTrue('callable' in module_config)\n self.assertTrue('config' in module_config)", "def test_load_configuration_is_callable():\n assert callable(config.load_configuration)", "def testLoadConfigs_noConfigs(self):\n config_path = GetTestFilePath('no_config')\n with six.assertRaisesRegex(\n self, lab_config.ConfigError,\n r'.* no lab config files under the path.'):\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()", "def test_config_is_loaded(config):\n assert config[\"DEBUG\"] is False", "def test_module_in_config(self, mocked_callable_loader,\n mocked_loader, mocked_config):\n config_filename = 'aconfigfile'\n finder = importer.Finder(config_filename)\n\n fullname = 'fake_package.fake_module'\n path = None\n finder.find_module(fullname, path)\n mocked_loader.assert_called_once_with(path, 'fake_callable',\n mock_callable_config)", "def test_config_class():\n assert config is not None", "def testLoadConfigs_notExist(self):\n config_path = GetTestFilePath('unified_lab_config/invalid_lab/hosts')\n with six.assertRaisesRegex(\n self, lab_config.ConfigError, r'.* doesn\\'t exist.'):\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()", "def testLoadConfigs_notExist(self):\n config_path = GetTestFilePath('valid/non_exist_config.yaml')\n with six.assertRaisesRegex(\n self, lab_config.ConfigError, r'.* doesn\\'t exist.'):\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()", "def test_load_no_backend(self):\n sample_file = os.path.join(os.path.dirname(__file__), \"broken-config-no-backend.yaml\")\n self.assertTrue(os.path.exists(sample_file), \"Did not found {}\".format(sample_file))\n\n def try_it():\n self.configurator.load_yaml_file(sample_file)\n\n self.assertRaises(ConfigurationError, try_it)", "def test_load_config(self):\n config = copyclipper.LoadConfig()\n self.assertTrue(len(config) > 0)", "def test_no_setting(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('DOES_NOT_EXIST')", "def check_config(cfg):", "def check_config(config):\n pass", "def test_module_initialisation_method_call_only(app_config):\n bootstrap._initialize_modules(app_config)", "def test_invalid_missing_config(self):\n invalid_config_data = 'test'\n with mock_open(LAMBDA_FILE, invalid_config_data):\n with mock_open(GLOBAL_FILE, invalid_config_data):\n with patch('os.path.exists') as mock_exists:\n mock_exists.return_value = False\n _load_config()", "def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')", "def test_no_config(self):\n exit_code = self.run_beat()\n\n assert exit_code == 1\n assert self.log_contains(\"error loading config file\") is True", "def test_get_is_callable():\n assert callable(config.get)", "def test_hooks_falsy_by_default():\n config = util.read_config(\"some-nonexistant-path\")\n assert not config[\"pre_write\"]\n assert not config[\"post_write\"]", "def test_config_file_empty(get_empty_config, monkeypatch, get_root, conf_obj):\n path = os.path.join(get_root, 'res', 'missing.yml')\n\n with pytest.raises(FileNotFoundError):\n get_empty_config(conf_obj, path)", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def _validate_pf_config(module, config):\n rc, out, err = module.run_command(['ls', config])\n\n # Fail if no config file is present\n if rc != 0:\n msg_err = f'Error: Config file does not exist: {config}'\n module.fail_json(msg=msg_err)", "def test_config_no_file(self):\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n self.assertRaises(ConfigError, Config, \"test_config.conf\")", "def test_empty_schema_cant_find_module() -> None:\n with patch(\"inspect.getmodule\", return_value=None):\n cv.empty_config_schema(\"test_domain\")({\"test_domain\": {\"foo\": \"bar\"}})", "def test_with_no_cfg(tmp_path):\n # Arange\n cfg = os.path.join(tmp_path, \"gcbo.json\")\n\n # Act\n rm = gcbo.RepoManager(cfg=cfg)\n\n # Assert\n assert rm.has_cfg is False", "def test_find_config_nonexist(self):\n with pytest.raises(scuba.config.ConfigError):\n scuba.config.find_config()", "def test_instantiate_non_existent_module(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"non_existent_module.some_class\"},\n {\"_target_\": \"another_non_existent_module.some_class\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ModuleNotFoundError for each test config\n for test_conf in test_configs:\n self.assertRaises(ModuleNotFoundError, instantiate, test_conf)", "def _check_config(self):", "def test_required_config_none(self):\n base_config = BaseConfig()\n setattr(base_config, 'required_config', ['TEST_CONF'])\n setattr(base_config, 'TEST_CONF', None)\n\n self.assertRaises(Exception, base_config.check_required_config)", "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()" ]
[ "0.7291979", "0.71385074", "0.6929926", "0.67423624", "0.6735557", "0.6493901", "0.6454785", "0.64439315", "0.6443603", "0.64419734", "0.63877636", "0.63867325", "0.6380733", "0.63782287", "0.6355612", "0.6320208", "0.6302732", "0.6293869", "0.62638265", "0.6169299", "0.6167937", "0.61522746", "0.61435884", "0.6123123", "0.61076003", "0.60906875", "0.60905683", "0.607798", "0.6045839", "0.603269" ]
0.80493516
0
Function to abstract output image filename construction. Current approach is to create a 'PROCESSED' subdir inside the subdirectory corresponding to the midperiod of the date range for the fullsize images and a 'SPLIT' subdirectory for the subimages.
def construct_image_savepath(self, date_string, coords_string, image_type="RGB"): if "SUB" in image_type: output_location = os.path.join(self.output_location, date_string, "SPLIT") else: output_location = os.path.join(self.output_location, date_string, "PROCESSED") # filename is the date, coordinates, and image type filename = f'{date_string}_{coords_string}_{image_type}.png' # full path is dir + filename full_path = os.path.join(output_location, filename) return full_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_subdirectory(subdir_path, regex_images, output_folder_if_pickle,\n min_n_leftside_metadata=10, only_ionogram_content_extraction_on_leftside_metadata=True, to_pickle=True):\n # Run segment_images on the subdirectory \n df_img,df_loss,df_outlier = segment_images(subdir_path, regex_images)\n\n # Determine ionogram grid mappings used to map (x,y) pixel coordinates of ionogram trace to (Hz, km) values\n stack = all_stack(df_img)\n col_peaks,row_peaks,mapping_Hz, mapping_km = get_grid_mappings(stack)\n\n # Translate metadata located on the left\n df_img_left = df_img[df_img['metadata_type']== 'left']\n \n if len(df_img_left.index) > min_n_leftside_metadata:\n # Determine leftside metadata grid (pixel coordinates to number, category mappings)\n df_img_left, df_loss_meta,dict_mapping,dict_hist= get_leftside_metadata(df_img_left,subdir_path)\n df_all_loss = df_loss.append(df_loss_meta)\n else:\n df_all_loss = df_loss\n \n # Extract the coordinates of the ionogram trace (black), Map the (x,y) pixel coordinates to (Hz, km) values and Extract select parameters i.e. fmin\n if only_ionogram_content_extraction_on_leftside_metadata:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img_left,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n else:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n\n df_processed['mapping_Hz'] = [mapping_Hz] * len(df_processed.index)\n df_processed['mapping_km'] = [mapping_km] * len(df_processed.index)\n\n if to_pickle:\n start,subdir_name = ntpath.split(subdir_path[:-1])\n start,dir_name = ntpath.split(start)\n df_processed.to_pickle(os.pardir + '/pickle/' + str(dir_name)+'_'+str(subdir_name)+'.pkl')\n \n df_all_loss = df_all_loss.append(df_loss_coord)\n return df_processed, df_all_loss,df_outlier", "def _out_fn(self, split_index):\n if split_index > 999:\n raise ValueError(\"Too many splitted files to generate: number \" +\n \"of splitted files exceed 1000.\")\n name = self.out_format.format(split_index)\n return op.join(self.out_dir, name)", "def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name", "def make_processed_directories(zone, region, zoom_level = 19, image_size = 256):\n os.system(f'mkdir ../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}')\n os.system(f'mkdir ../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}')\n img_path = f'../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}'\n mask_path = f'../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}'\n return img_path, mask_path", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def name_final_path(out_img_folder):\n if out_img_folder == None:\n return \"./.out_hidden_images\"\n else:\n return out_img_folder", "def calc_filename(name, split='.', include_time=False, dbg=False):\n dt_str, time_str = calc_date_time()\n strt = str(name).split(split)\n res = ''\n\n if len(strt) == 1:\n res = strt[0]\n elif len(strt) > 1:\n append = split.join(strt[1:]) if len(strt) > 2 else strt[1]\n if include_time:\n res = \"_\".join([strt[0], dt_str, time_str])\n else:\n res = \"_\".join([strt[0], dt_str])\n res = res + split + append\n else:\n res = 'FAILED'\n\n dbc.print_helper((\"calc_filename: \" + res), dbg=dbg)\n\n return res", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def split_and_save_sub_images(self, image,\n date_string,\n coords_string,\n image_type,\n npix=50):\n\n coords = [float(coord) for coord in coords_string.split(\"_\")]\n sub_images = crop_image_npix(image, npix,\n region_size=self.region_size,\n coords = coords)\n\n output_location = os.path.dirname(self.construct_image_savepath(date_string,\n coords_string,\n 'SUB_'+image_type))\n for i, sub in enumerate(sub_images):\n # sub will be a tuple (image, coords) - unpack it here\n sub_image, sub_coords = sub\n output_filename = f'sub{i}_'\n output_filename += \"{0:.3f}_{1:.3f}\".format(sub_coords[0],\n sub_coords[1])\n output_filename += \"_{}\".format(image_type)\n output_filename += '.png'\n self.save_image(sub_image, output_location, output_filename, verbose=False)\n return True", "def create_output_directory_for_resized_images():\n\n try:\n if not os.path.isdir(RESIZED_NEGATIVE_PATH):\n return os.makedirs(RESIZED_NEGATIVE_PATH)\n elif not os.path.isdir(RESIZED_POSITIVE_PATH):\n return os.makedirs(RESIZED_POSITIVE_PATH)\n except OSError as e:\n print('Error --> {}'.format(e))", "def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename", "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "def get_output_filename(item: str, root: str, i: int) -> str:\n element_split = item.split(\"/\")\n item, ext = element_split[-1].split(\".\")\n if i < 0:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}.{ext}\"\n else:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}_aug{i}.{ext}\"", "def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")", "def construct_basename(self, row, obstime=None):\n _obstime = self.construct_obstime(row) if obstime is None else obstime\n tiso = time.Time(_obstime, format='isot')\n dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],\n self['target'][row].replace(\" \", \"\"),\n self.spectrograph.camera,\n datetime.datetime.strftime(dtime, '%Y%m%dT'),\n tiso.value.split(\"T\")[1].replace(':',''))", "def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]", "def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def _get_image_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Rectified_rescaled/0.125/\"\n else:\n return \"Rectified_rescaled/0.25/\"\n else:\n return \"Rectified/\"", "def split_dir(dirr, output_dir, dirs=['train', 'validation', 'test'], split=(.5,.25,.25)):\n\n # get all image paths\n image_paths = []\n for filepath in pathlib.Path(dirr).glob('**/*'):\n image_paths.append(filepath.absolute())\n\n # organize into {class_name:[class_image_paths, ...], ...}\n class_dict = {}\n for i in image_paths:\n fname = str(i).split(\"/\")\n file_name = fname[len(fname)-1]\n class_name = fname[len(fname)-2]\n if class_name not in class_dict.keys():\n class_dict[class_name] = []\n class_dict[class_name].append(str(i))\n\n del class_dict['images'] #I don't know why\n\n # organize into {class_name:[[train_paths],[validation_paths],[test_paths]], ...}\n # by given\n for k in class_dict.keys():\n paths = class_dict[k]\n\n train_split = int(len(paths)*split[0])\n validation_split = int(len(paths)*split[1])\n\n train_paths = paths[train_split:]\n validation_paths = paths[train_split:validation_split+train_split]\n test_paths = paths[validation_split+train_split:]\n\n class_dict[k] = [train_paths, validation_paths, test_paths]\n\n # make output dirs\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n os.makedirs(output_dir+\"/\"+dirs[0])\n os.makedirs(output_dir+\"/\"+dirs[1])\n os.makedirs(output_dir+\"/\"+dirs[2])\n\n # move everything\n for k in class_dict.keys():\n for d_i,d in enumerate(dirs):\n\n if not os.path.exists(output_dir+\"/\"+d+\"/\"+k):\n os.makedirs(output_dir+\"/\"+d+\"/\"+k)\n\n for path in class_dict[k][d_i]:\n file_name = path.split(\"/\")\n file_name = file_name[len(file_name)-1]\n copyfile(path, output_dir+\"/\"+d+\"/\"+k+\"/\"+file_name)", "def obsfileinfo2filefolder(obsfileinfo):\n filefoldername = '{}_{}'.format(obsfileinfo['station_id'],\n obsfileinfo['filenametime'])\n if obsfileinfo['ldat_type'] == \"bst-357\":\n filefoldername += \"_rcu357\"\n else:\n if obsfileinfo['rcumode'] != []:\n rcumodestr = \\\n ''.join([str(rcumode) for rcumode in obsfileinfo['rcumode']])\n else:\n rcumodestr = str(obsfileinfo['mode'])\n filefoldername += \"_spw\" + rcumodestr\n if obsfileinfo['sb'] != [] and obsfileinfo['sb'] != '':\n filefoldername += \"_sb\"\n filefoldername += seqlists2slicestr(obsfileinfo['sb'])\n if 'integration' in obsfileinfo:\n filefoldername += \"_int\" + str(int(obsfileinfo['integration']))\n if 'duration_scan' in obsfileinfo:\n filefoldername += \"_dur\" + str(int(obsfileinfo['duration_scan']))\n if obsfileinfo['ldat_type'] != 'sst':\n if str(obsfileinfo['pointing']) != \"\":\n filefoldername += \"_dir\" + str(obsfileinfo['pointing'])\n else:\n filefoldername += \"_dir,,\"\n # filefoldername += \"_\" + obsfileinfo['source']\n # ldat_type extension\n filefoldername += \"_\" + obsfileinfo['ldat_type']\n return filefoldername", "def make_image_path(raw_img, input_base, base_path):\n path = os.path.dirname(raw_img)\n relpath = os.path.relpath(path, input_base)\n if relpath == '.':\n dest_folder = base_path\n else:\n dest_folder = os.path.join(base_path, relpath)\n return os.path.normpath(dest_folder)\n # return dest_folder", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def process_directory(working_directory, cc_size, output_directory):\n print \"\\nProcessing directory {0}\".format(working_directory)\n \n for dirpath, dirnames, filenames in os.walk(working_directory):\n for f in filenames:\n if f.split('.')[-1] == 'tif':\n img = load_image(os.path.join(dirpath, f))\n onebitimage = img.to_onebit()\n onebitimage.despeckle(int(cc_size))\n output_path = os.path.join(output_directory, f)\n # print onebitimage\n # print (os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n # onebitimage.save_tiff(os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n\n onebitimage.save_tiff(output_path)\n print output_path\n else:\n pass", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def generate_filename(\r\n filepath,\r\n filestartwith,\r\n fileendwith,\r\n run_date,\r\n filemask):\r\n\r\n filedate = generate_dateformat(run_date, filemask)\r\n if not filedate:\r\n filename = filestartwith\r\n else:\r\n filename = filestartwith + filedate\r\n\r\n if fileendwith:\r\n filename = filename + fileendwith\r\n\r\n if filepath and len(filepath.strip()) > 0:\r\n filename = filepath.strip() + '/' + filename\r\n\r\n return filename", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))" ]
[ "0.63908", "0.6056102", "0.59397084", "0.59102297", "0.5876066", "0.5860729", "0.5819184", "0.58062077", "0.580037", "0.5758858", "0.57563317", "0.5748149", "0.57457376", "0.5732462", "0.56949836", "0.56919485", "0.56668204", "0.5616348", "0.56162333", "0.5608194", "0.5601554", "0.5580091", "0.55794823", "0.55767566", "0.5575614", "0.5566899", "0.5530796", "0.552057", "0.5517063", "0.55158323" ]
0.6616829
0
Function to run the module. Loop over all datesubranges and call process_single_date() on each of them.
def run(self): super().run() date_subdirs = sorted(self.list_directory(self.input_location, self.input_location_type)) for date_subdir in date_subdirs: if not re.search("^([\d]{4}-[\d]{2}-[\d]{2})", date_subdir): print("{}: Directory name {} not in YYYY-MM-DD format"\ .format(self.name, date_subdir)) continue date_path = os.path.join(self.input_location, date_subdir, "RAW") if len(self.list_directory(date_path, self.input_location_type)) == 0: continue processed_ok = self.process_single_date(date_path) if not processed_ok: continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # dates lists for testing\n dates = [\n datetime.date(2010, 1, 15),\n datetime.date(2012, 6, 29)\n ]\n targets = [\n datetime.date(2000, 1, 1),\n datetime.date(2016, 10, 3)\n ]\n #loop through cases\n for d in dates:\n for t in targets:\n #calculate differences\n dayDiff = diffDates(d, t, \"days\")\n monthDiff = diffDates(d, t, \"months\")\n yearDiff = diffDates(d, t, \"years\")\n #create dictionary for printing\n vals = {\"day\":dayDiff, \"month\":monthDiff, \"year\":yearDiff}\n #print out values\n for period in vals:\n diff = vals[period]\n period = str(period) + (\"s\" if diff != 1 else \"\")\n print \"There are {0} {1} between {2} and {3}\".format(diff, period, t, d)", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n print(\"Checking date \", str(year) + \"/\" + str(start_month) + \"/\" + str(start_day))\n \n\n if not is_valid(year, start_month, start_day) : \n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n if not is_valid(2000, end_month, end_day):\n sys.exit(\"Ending month and day must be part of a valid date\")\n count_days(year,start_month,start_day,end_month,end_day)", "def task_run_core():\n\n ## initialize parameters\n if task_get_option('format'):\n fmts = task_get_option('format')\n else:\n fmts = 'HB' # default value if no format option given\n for fmt in fmts.split(','):\n last_updated = fetch_last_updated(fmt)\n write_message(\"last stored run date is %s\" % last_updated)\n\n sql = {\n \"all\" : \"\"\"SELECT br.id FROM bibrec AS br, bibfmt AS bf\n WHERE bf.id_bibrec = br.id AND bf.format = '%s'\"\"\" % fmt,\n \"last\": \"\"\"SELECT br.id FROM bibrec AS br\n INNER JOIN bibfmt AS bf ON bf.id_bibrec = br.id\n WHERE br.modification_date >= '%(last_updated)s'\n AND bf.format='%(format)s'\n AND bf.last_updated < br.modification_date\"\"\" \\\n % {'format': fmt,\n 'last_updated': last_updated.strftime('%Y-%m-%d %H:%M:%S')},\n \"missing\" : \"\"\"SELECT br.id\n FROM bibrec as br\n LEFT JOIN bibfmt as bf\n ON bf.id_bibrec = br.id AND bf.format ='%s'\n WHERE bf.id_bibrec IS NULL\n AND br.id BETWEEN %%s AND %%s\n \"\"\" % fmt,\n }\n sql_queries = []\n cds_query = {}\n if task_has_option(\"all\"):\n sql_queries.append(sql['all'])\n if task_has_option(\"last\"):\n sql_queries.append(sql['last'])\n if task_has_option(\"collection\"):\n cds_query['collection'] = task_get_option('collection')\n else:\n cds_query['collection'] = \"\"\n\n if task_has_option(\"field\"):\n cds_query['field'] = task_get_option('field')\n else:\n cds_query['field'] = \"\"\n\n if task_has_option(\"pattern\"):\n cds_query['pattern'] = task_get_option('pattern')\n else:\n cds_query['pattern'] = \"\"\n\n if task_has_option(\"matching\"):\n cds_query['matching'] = task_get_option('matching')\n else:\n cds_query['matching'] = \"\"\n\n if task_has_option(\"recids\"):\n recids = split_cli_ids_arg(task_get_option('recids'))\n else:\n recids = []\n\n ### sql commands to be executed during the script run\n ###\n bibreformat_task(fmt, sql, sql_queries, cds_query, task_has_option('without'), not task_has_option('noprocess'), recids)\n return True", "def run(settings_module: str = \"\", s_start_datetime: str = \"\", s_end_datetime: str = \"\") -> None:\n if not valid_datetime_string(s_start_datetime):\n logger.error(\"Aborting run: Expected format of Start datetime is YYMMDD_HHmm\")\n return\n\n if not valid_datetime_string(s_end_datetime):\n logger.error(\"Aborting run: Expected format of End datetime is YYMMDD_HHmm\")\n return\n\n start_datetime = datetime.strptime(s_start_datetime, MONGO_DATETIME_FORMAT)\n end_datetime = datetime.strptime(s_end_datetime, MONGO_DATETIME_FORMAT)\n fields_set_datetime = datetime.strptime(FILTERED_POSITIVE_FIELDS_SET_DATE, \"%Y-%m-%d\")\n\n if start_datetime > end_datetime:\n logger.error(\"Aborting run: End datetime must be greater than Start datetime\")\n return\n\n if end_datetime > fields_set_datetime:\n logger.error(\"Aborting run: Date range must be prior to the 17th December\")\n return\n\n config, settings_module = get_config(settings_module)\n\n logging.config.dictConfig(config.LOGGING)\n\n logger.info(\"-\" * 80)\n logger.info(\"STARTING FILTERED POSITIVES LEGACY UPDATE\")\n logger.info(f\"Time start: {datetime.now()}\")\n start_time = time.time()\n\n updated_key = \"Updated\"\n time_key = \"Time taken\"\n\n mongo_versions_updated = {\n FILTERED_POSITIVE_VERSION_0: {updated_key: False, time_key: 0.0},\n FILTERED_POSITIVE_VERSION_1: {updated_key: False, time_key: 0.0},\n FILTERED_POSITIVE_VERSION_2: {updated_key: False, time_key: 0.0},\n }\n\n mlwh_versions_updated = {\n FILTERED_POSITIVE_VERSION_0: {updated_key: False, time_key: 0.0},\n FILTERED_POSITIVE_VERSION_1: {updated_key: False, time_key: 0.0},\n FILTERED_POSITIVE_VERSION_2: {updated_key: False, time_key: 0.0},\n }\n\n try:\n continue_migration = pre_migration_filtered_positive_check(config, start_datetime, end_datetime)\n\n if continue_migration:\n logger.info(f\"Selecting legacy samples from Mongo between {start_datetime} and {end_datetime}...\")\n samples = mongo_samples_by_date(config, start_datetime, end_datetime)\n\n legacy_samples_num = len(samples)\n logger.info(f\"{legacy_samples_num} samples found from Mongo\")\n\n root_sample_ids, plate_barcodes = extract_required_cp_info(samples)\n\n logger.info(\"Querying for v0 cherrypicked samples from MLWH\")\n # Get v0 cherrypicked samples\n v0_cp_samples_df = get_cherrypicked_samples_by_date(\n config,\n list(root_sample_ids),\n list(plate_barcodes),\n \"1970-01-01 00:00:01\",\n V0_V1_CUTOFF_TIMESTAMP,\n )\n\n logger.debug(f\"Found {len(v0_cp_samples_df.index)} v0 cherrypicked samples\") # type: ignore\n\n logger.info(\"Querying for cherrypicked samples from MLWH\")\n # Get v1 cherrypicked samples\n v1_cp_samples_df = get_cherrypicked_samples_by_date(\n config,\n list(root_sample_ids),\n list(plate_barcodes),\n V0_V1_CUTOFF_TIMESTAMP,\n V1_V2_CUTOFF_TIMESTAMP,\n )\n\n logger.debug(f\"Found {len(v1_cp_samples_df.index)} v1 cherrypicked samples\") # type: ignore\n\n logger.info(\"Splitting samples by version...\")\n samples_by_version = split_mongo_samples_by_version(samples, v0_cp_samples_df, v1_cp_samples_df)\n\n update_timestamp = datetime.now()\n\n for version, version_samples in samples_by_version.items():\n filtered_positive_identifier = filtered_positive_identifier_by_version(version)\n logger.info(f\"Updating {version} filtered positives...\")\n update_filtered_positive_fields(\n filtered_positive_identifier,\n version_samples,\n version,\n update_timestamp,\n )\n\n logger.info(\"Updated filtered positives\")\n\n logger.info(\"Updating Mongo\")\n\n for version, version_samples in samples_by_version.items():\n logger.info(f\"Updating {version} filtered positives in Mongo, total {len(version_samples)} records...\")\n mongo_update_start_time = time.time()\n mongo_updated = update_mongo_filtered_positive_fields(\n config,\n version_samples,\n version,\n update_timestamp,\n )\n if mongo_updated:\n logger.info(f\"Finished updating {version} filtered positives in Mongo\")\n\n mongo_update_end_time = time.time()\n mongo_versions_updated[version][updated_key] = True\n mongo_versions_updated[version][time_key] = round(\n mongo_update_end_time - mongo_update_start_time, 2\n )\n\n logger.info(f\"Updating {version} filtered positives in MLWH...\")\n mlwh_update_start_time = time.time()\n\n mlwh_updated = update_mlwh_filtered_positive_fields_batched(\n config, version_samples, version, update_timestamp\n )\n\n if mlwh_updated:\n logger.info(f\"Finished updating {version} filtered positives in MLWH\")\n\n mlwh_update_end_time = time.time()\n mlwh_versions_updated[version][updated_key] = True\n mlwh_versions_updated[version][time_key] = round(\n mlwh_update_end_time - mlwh_update_start_time, 2\n )\n\n logger.info(\"Finished updating databases\")\n else:\n logger.info(\"Now exiting migration\")\n except Exception as e:\n logger.error(\"---------- Process aborted: ----------\")\n logger.error(f\"An exception occurred, at {datetime.now()}\")\n logger.exception(e)\n raise\n finally:\n end_time = time.time()\n logger.info(\n f\"\"\"\n ---------- Processing status of filtered positive field migration: ----------\n -- Mongo updated with v0 filtered positives: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_0][updated_key]}, \\\ntime taken: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_0][time_key]}s\n -- Mongo updated with v1 filtered positives: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_1][updated_key]}, \\\ntime taken: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_1][time_key]}s\n -- Mongo updated with v2 filtered positives: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_2][updated_key]}, \\\ntime taken: \\\n{mongo_versions_updated[FILTERED_POSITIVE_VERSION_2][time_key]}s\n -- MLWH updated with v0 filtered positives: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_0][updated_key]}, \\\ntime taken: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_0][time_key]}s\n -- MLWH updated with v1 filtered positives: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_1][updated_key]}, \\\ntime taken: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_1][time_key]}s\n -- MLWH updated with v2 filtered positives: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_2][updated_key]}, \\\ntime taken: \\\n{mlwh_versions_updated[FILTERED_POSITIVE_VERSION_2][time_key]}s\n \"\"\"\n )\n\n logger.info(f\"Time finished: {datetime.now()}\")\n logger.info(f\"Migration complete in {round(end_time - start_time, 2)}s\")\n logger.info(\"=\" * 80)", "def process_data(self):\n timer_start = time.time()\n # ensure self.df_events and self.df_locations are not None\n if self.df_events is None or self.df_locations is None:\n print(\"Missing data: either df_events or df_locations is None\")\n return\n # set start and end based on self.df_events if not already set\n if not self.start:\n self.start = self.df_events['event_time'].min()\n if not self.end:\n self.end = self.df_events['event_time'].max()\n print(f\"date range for events data is from {self.start} to {self.end}\")\n # create Grid object before processing any data\n grid = self.compute_grid_cells(self.df_locations)\n # clean and combine events and locations data\n df_data = self.combine_events_and_locations(grid)\n print(df_data.shape)\n # df_data.to_csv('../../../data_files/20210506_cleanedInputDataCumSum.csv', index=False)\n # df_data = pd.read_csv('../../../data_files/20210415_cleanedInputDataAprilCumSum.csv')\n # process data within grid class\n df_processed = grid.process_data(df_data, 'weekly')\n # df_processed = self.calculate_demand(df_processed)\n # df_processed.to_csv('../../../data_files/20210506_processedGridCellData.csv')\n # set df_demand to be df_processed\n df_processed.reset_index(inplace=True)\n df_processed = df_processed.astype({'date': 'str', 'avail_count': 'float', 'avail_mins': 'float', 'prob_scooter_avail': 'float', 'trips': 'float', 'adj_trips': 'float'})\n # make sure dates are within start and end dates\n start_date = str(iso8601.parse_date(self.start).date())\n end_date = str(iso8601.parse_date(self.end).date())\n df_processed = df_processed[(df_processed['date'] >= start_date) & (df_processed['date'] <= end_date)]\n self.set_demand(df_processed)\n timer_end = time.time()\n print('Elapsed time to process data:', (timer_end - timer_start)/60.0, 'minutes')", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n \n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n if not is_valid(year, start_month, start_day):\n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n else:\n print(count_days_main(year, start_month, start_day, end_month, end_day))", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "def main():\n exit_if_already_started()\n while True:\n for timeframe in ['all', 'month', 'week']:\n subreddits = load_list('subs.txt')\n while subreddits:\n # Grab all images/comments from sub, remove from list\n parse_subreddit(subreddits.pop(0), timeframe)", "def run(argparser: ArgumentParser) -> None:\n args = argparser.parse_args()\n\n # Parse datetime args\n if getattr(args, \"dtstart\", None):\n args.dtstart = datetime.strptime(args.dtstart, \"%Y-%m-%d\")\n\n if getattr(args, \"dtend\", None):\n args.dtend = datetime.strptime(args.dtend, \"%Y-%m-%d\").replace(\n hour=23, minute=59, second=59\n )\n\n if getattr(args, \"begin\", None):\n args.begin = datetime.strptime(args.begin, \"%Y-%m-%d\")\n\n # Execute selected function\n if args.func:\n args.func(args)\n else:\n argparser.print_help()", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def _run(self, date: datetime = None):\n\n if date is None:\n urls = self._get_csvs()\n else:\n urls = self._get_csvs_date(date)\n\n parsed = self._get_parsed_files()\n\n urls = [url for url in urls if url not in parsed]\n\n download_paths = self._create_local_download_paths(urls)\n\n # Multiprocessingly download/format/insert all csvs\n # using four times # of CPUs\n with utils.Pool(0, 4, self.name) as pool:\n pool.map(utils.download_file, urls, download_paths)\n pool.map(self._reformat_csv, download_paths)\n pool.map(self._db_insert, download_paths)\n\n with Historical_ROAs_Table() as t:\n t.delete_duplicates()\n\n self._add_parsed_files(urls)\n\n utils.delete_files(self.path)", "def loop_through_dates(in_dir,\n out_dir,\n start_date,\n end_date,\n extent,\n temporal_composite=\"monthly\",\n product=\"all_products\",\n area=\"global-extracted\",\n neodaas_name=False):\n\n start_date_obj = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date_obj = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n current_date = start_date_obj\n\n while current_date < end_date_obj:\n\n if temporal_composite.lower() == \"monthly\":\n str_date = current_date.strftime(\"%Y%m\")\n else:\n str_date = current_date.strftime(\"%Y%m%d\")\n\n file_path = os.path.join(in_dir, temporal_composite, product,\n \"{:02}\".format(current_date.year),\n \"*{}*nc\".format(str_date))\n in_netcdfs = glob.glob(file_path)\n\n for in_netcdf in in_netcdfs:\n\n print(\"Extracting {}\".format(in_netcdf))\n out_netcdf_dir = os.path.join(out_dir,\n \"{:02}\".format(current_date.year))\n try:\n os.makedirs(out_netcdf_dir)\n except OSError:\n # If already exists continue\n pass\n\n if neodaas_name:\n output_name = \"OC-CCI_{product}_L4_{area}_{period}_{date}.nc\".format(\n product=product,\n area=area,\n period=TEMPORAL_COMPOSITE_NAMES[temporal_composite],\n date=str_date)\n else:\n output_name = os.path.basename(in_netcdf).replace(\".nc\",\n \"_{}.nc\".format(area))\n out_netcdf_file = os.path.join(out_netcdf_dir, output_name)\n\n if os.path.isfile(out_netcdf_file):\n continue\n\n libsubarea.nk_subarea(in_netcdf, out_netcdf_file,\n [\"lon\", \"lat\"], [extent[0], extent[3]],\n [extent[2], extent[1]])\n\n if temporal_composite.lower() == \"monthly\":\n current_date = current_date + relativedelta.relativedelta(months=1)\n # For the daily, 5day and 8day composite itterate a day at a time so get all composites\n # If not then when starting out of sequence keep missing data.\n else:\n current_date = current_date + relativedelta.relativedelta(days=1)", "def main(start, stop):\n print(\"!*\"*50)\n print(\"PROCESS DATA MAIN FUNCTION\")\n print(\"!*\"*50)\n print(\"START= \", start, \"STOP =\", stop)\n\n # create instance of master dept dict\n by_dept_dict = get_master_by_dept_dict()\n\n # populate claims for range\n by_dept_dict['claims_for_range'] = get_range_of_claims(start, stop)\n print(\"CLAIMS FOR RANGE: \", by_dept_dict['claims_for_range'])\n\n # get employees\n employees = get_employees(start, stop)\n # iterate over the list employee queries\n by_dept_dict = iterate_over_employees(by_dept_dict, employees, start, stop)\n\n return by_dept_dict", "def main(config_file, rows, cols):\n # setup paths\n _, _, params = cf.get_ifg_paths(config_file)\n _postprocess_linrate(rows, cols, params)\n if params[cf.TIME_SERIES_CAL]:\n _postprocess_timeseries(rows, cols, params)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')", "def _loop_daily(config, configfile, topdatadir, startdate, model_forcing):\n\n delta = datetime.timedelta(days=1)\n scriptdir = config['SETUP']['LISFDIR'] + '/lis/utils/usaf/s2s/s2s_modules/s2spost/'\n\n # The very first day may be missing. Gracefully handle this\n firstdate = startdate\n if _is_lis_output_missing(firstdate, model_forcing):\n firstdate += delta\n\n if startdate.month == 12:\n enddate = datetime.datetime(year=(startdate.year + 1),\n month=1,\n day=1)\n else:\n enddate = datetime.datetime(year=startdate.year,\n month=(startdate.month + 1),\n day=1)\n\n curdate = firstdate\n while curdate <= enddate:\n cmd = f\"python {scriptdir}/daily_s2spost_nc.py {configfile}\"\n for model in [\"SURFACEMODEL\", \"ROUTING\"]:\n cmd += f\" lis_fcst/{model_forcing}/{model}/\"\n cmd += f\"{curdate.year:04d}{curdate.month:02d}\"\n cmd += \"/LIS_HIST_\"\n cmd += f\"{curdate.year:04d}{curdate.month:02d}{curdate.day:02d}\"\n cmd += \"0000.d01.nc\"\n\n cmd += f\" {topdatadir}/cf_{model_forcing}_\"\n cmd += f\"{startdate.year:04d}{startdate.month:02d}\"\n\n cmd += f\" {curdate.year:04d}{curdate.month:02d}{curdate.day:02d}00\"\n\n cmd += f\" {model_forcing}\"\n\n print(cmd)\n returncode = subprocess.call(cmd, shell=True)\n if returncode != 0:\n print(\"[ERR] Problem running CF conversion!\")\n sys.exit(1)\n\n curdate += delta", "def date_range(all_files,start_year,start_month,start_day,end_year,end_month,\r\n end_day):\r\n\r\n d1 = date(start_year,start_month,start_day)\r\n d_last = date(end_year,end_month,end_day)\r\n day_range = (d_last - d1).days\r\n #print('day range: %s' %day_range)\r\n files = []\r\n for t in range(day_range):\r\n d2 = d1 + timedelta(t)\r\n d2_str1 = str(d2)\r\n d2_str2 = d2.strftime('%Y_%m_%d')\r\n # print(d2)\r\n for f in all_files:\r\n if d2_str1 in str(f) or d2_str2 in str(f):\r\n files.append(f)\r\n return(files)", "def info_date(source_files: AllSourceFilenames = AllSourceFilenames(),\n out_datefirst: OutputCommonData = OutputCommonData(\"cwb.datefirst\"),\n out_datelast: OutputCommonData = OutputCommonData(\"cwb.datelast\"),\n datefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.datefrom\"),\n dateto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.dateto\"),\n timefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timefrom\"),\n timeto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timeto\")):\n first_date = None\n last_date = None\n\n for file in source_files:\n from_dates = sorted((int(x[0]), x[1]) for x in datefrom.read_attributes(file, (datefrom, timefrom)) if x[0])\n if from_dates and (first_date is None or from_dates[0] < first_date):\n first_date = from_dates[0]\n to_dates = sorted((int(x[0]), x[1]) for x in dateto.read_attributes(file, (dateto, timeto)) if x[0])\n if to_dates and (last_date is None or to_dates[-1] > last_date):\n last_date = to_dates[-1]\n\n if not first_date or not last_date:\n raise SparvErrorMessage(\"Corpus is configured as having date information, but no dates were found.\")\n\n # Parse and re-format dates (zero-padding dates with less than 8 digits, needed by strptime)\n first_date_d = datetime.strptime(f\"{str(first_date[0]).zfill(8)} {first_date[1]}\", \"%Y%m%d %H%M%S\")\n first_date_formatted = first_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n last_date_d = datetime.strptime(f\"{str(last_date[0]).zfill(8)} {last_date[1]}\", \"%Y%m%d %H%M%S\")\n last_date_formatted = last_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n out_datefirst.write(first_date_formatted)\n out_datelast.write(last_date_formatted)", "def _run_scheduled_daily_tasks():\n worker.add_task(daily.run)", "def process_data(self, df_data, breakdown='weekly'):\n df_data['time'] = df_data['time'].apply(self.remove_time_zone)\n df_data['grid_coord'] = df_data['grid_coord'].astype(str)\n # return self.process_chunk((self.remove_time_zone('2019-04-15T00:00:00-04:00'), self.remove_time_zone('2019-04-16T00:00:00-04:00')), df_data)\n # get weekly/daily time chunks within cleanedInputData\n week_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\n start = min(df_data['time']) #str\n end = max(df_data['time']) #str\n start_date = iso8601.parse_date(start).replace(hour=0, minute=0, second=0)\n end_date = (iso8601.parse_date(end) + timedelta(days=1)).replace(hour=0, minute=0, second=0)\n if breakdown == \"weekly\":\n dates = pd.date_range(start_date, end_date, freq='W-'+week_days[start_date.weekday()])\n dates = [e.isoformat() for e in dates] + [end_date.isoformat()]\n else: # breakdown == \"daily\"\n dates = pd.date_range(start_date, end_date, freq='d')\n dates = [e.isoformat() for e in dates]\n time_chunks = []\n for left, right in zip(dates, dates[1:]):\n time_chunks.append((left, right))\n # return self.process_chunk(time_chunks[0], df_data)\n # parallelize processing between time chunks\n with Pool(cpu_count()) as p:\n ret_list = p.map(partial(self.process_chunk, df_data=df_data), time_chunks)\n return pd.concat(ret_list)", "def generate_dates(self, event):\n dates = []\n dtstart = self.tz_localize(event['dtstart'].dt)\n if 'dtend' in event:\n dtend = self.tz_localize(event['dtend'].dt)\n # DTEND is exclusive, so the real ending date is one day before\n if is_date(dtend):\n dtend -= datetime.timedelta(days=1)\n else:\n dtend = None\n # Normal case: no repetition\n if not 'rrule' in event:\n dates.append(self.format_dateinterval(dtstart, dtend))\n # Handle recurrent events\n else:\n ruleset = rrule.rruleset()\n rule = rrule.rrulestr(event['rrule'].to_ical().decode('utf-8'),\n dtstart=dtstart)\n ruleset.rrule(rule)\n # Parse all types of recurrence constraints\n for prop in ['rdate', 'exdate']:\n if not prop in event:\n continue\n # This can return either a single value or a list, so it's\n # a mess...\n prop_dates = event[prop]\n if not isinstance(prop_dates, list):\n prop_dates = [prop_dates]\n for prop_date in prop_dates:\n # This is a vDDDLists\n for vddd in prop_date.dts:\n dt = vddd.dt\n # EXDATE and RDATE are allowed to be dates,\n # convert them to datetime. TODO: should the time\n # be midnight, or the time from DTSTART?\n if is_date(dt):\n dt = datetime.datetime.combine(dt, datetime.time())\n dt = self.tz_localize(dt)\n ruleset.__getattribute__(prop)(dt)\n # We now have a ruleset that expands to a list of starting\n # date or datetime, one for each repetition.\n for dtstart_repeat in itertools.islice(ruleset, MAX_RECURRING_EVENTS):\n # Handle case where dtstart is a date, since rrule always\n # returns datetime objects.\n if is_date(dtstart):\n dtstart_repeat = dtstart_repeat.date()\n # Compute matching dtend if applicable\n if dtend == None:\n dtend_repeat = None\n else:\n dtend_repeat = dtend + (dtstart_repeat - dtstart)\n dates.append(self.format_dateinterval(dtstart_repeat, dtend_repeat))\n return dates", "def run_methods(self):\n try:\n # dictionaries of population time series\n self.batch_exponential()\n except Exception as e:\n print(str(e))", "def test_all(self):\n\n # year = 1980 #unused\n date = datetime.date(1980, 1, 1)\n while date < datetime.date(1981, 1, 1):\n if date.month <= 4:\n mindate, maxdate = datetime.date(1980, 1, 1), datetime.date(1980, 4, 30)\n elif date.month <= 8:\n mindate, maxdate = datetime.date(1980, 5, 1), datetime.date(1980, 8, 31)\n else:\n mindate, maxdate = datetime.date(1980, 9, 1), datetime.date(1980, 12, 31)\n\n startdate, enddate = get_tertialspan(date)\n self.assertTrue(startdate >= mindate)\n self.assertTrue(startdate <= maxdate)\n self.assertTrue(enddate >= mindate)\n self.assertTrue(enddate <= maxdate)\n\n date += datetime.timedelta(days=1)", "def pullGateCountDateRange(start_date, end_date):\n start_date = parser.parse(start_date)\n end_date = parser.parse(end_date) + timedelta(days=1)\n dates = []\n for single_date in daterange(start_date, end_date):\n dates.append(single_date.strftime(\"%Y-%m-%d\"))\n for i in range(len(dates)):\n req = pullGateCount(dates[i], dates[i+1])\n data = req.json()\n if req.status_code >= 400:\n print(\"Error1:\", dates[i], json.dumps(data, indent=0))\n else:\n # Load data\n for itm in data[\"results\"]:\n tmpTZD = {}\n localDT = parser.parse(itm[\"recordDate_hour_1\"]).replace(\n tzinfo=pytz.utc).astimezone(local_tz)\n tmpTZD['local_timestamp'] = localDT.isoformat()\n tmpTZD['year'] = localDT.year\n tmpTZD['month'] = localDT.month\n tmpTZD['day'] = localDT.day\n tmpTZD['hour'] = localDT.hour\n tmpTZD['minute'] = localDT.minute\n tmpTZD['second'] = localDT.second\n tmpTZD['time_zone_name'] = localDT.tzname()\n tmp = itm\n tmp['localDateTime'] = tmpTZD\n saveCybercomData(tmp)\n # print(dates[i])\n if dates[i+1] == dates[-1]:\n break\n return \"Date(s) Imported/Updated: {0}\".format(\",\".join(dates[:-1]))", "def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def execute(self):\n # Set up DB and googlesheets doc access\n self._connect_to_rdr_replica()\n service_key_info = gcp_get_iam_service_key_info(self.gcp_env.service_key_id)\n gs_creds = gspread.service_account(service_key_info['key_path'])\n gs_file = gs_creds.open_by_key(self.doc_id)\n\n # These strings converted to all lowercase when used as SQL query filters\n for origin in ['Vibrent', 'CareEvolution']:\n self._set_origin_value(origin)\n # Retrieve the daily data and build the report. Partial string substitution for the SQL statments is done\n # here; the remaining substitutions occur in the _get_consent_validation_dataframe() method\n self.consent_df = self._get_consent_validation_dataframe(\n self.report_sql.format_map(SafeDict(report_date=self.report_date.strftime(\"%Y-%m-%d\"),\n origin_filter=self.origin_value.lower()))\n )\n self.create_daily_report(gs_file)\n _logger.info(f'{self.origin_value} Daily report complete')\n self._clear_report()", "def run(self, input_time_series=None, num_iter=None, record=False,\n output=False):\n pass" ]
[ "0.598406", "0.58095527", "0.5807055", "0.5773897", "0.57469034", "0.5707404", "0.5694574", "0.56121904", "0.56060857", "0.5522661", "0.54842377", "0.5463781", "0.538987", "0.5376148", "0.5323573", "0.5313236", "0.5301901", "0.52983844", "0.52903414", "0.5290009", "0.5263263", "0.521021", "0.5203728", "0.5196754", "0.5169017", "0.51625013", "0.5162128", "0.5161272", "0.51608795", "0.5154377" ]
0.63575816
0
Check the RGB subimage corresponding to this NDVI image looks OK.
def check_sub_image(self, ndvi_filename, input_path): rgb_filename = re.sub("BWNDVI","RGB",ndvi_filename) rgb_img = Image.open(self.get_file(os.path.join(input_path, rgb_filename), self.input_location_type)) img_ok = check_image_ok(rgb_img, 0.05) return img_ok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"NDVI\",\"RGB\",ndvi_filename)\n rgb_img = self.get_image(os.path.join(input_path, rgb_filename))\n\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def test_check_wrong_image(self):\n result = analyzer.check_image_color(\"tests/test_files/non_exists.jpg\")\n self.assertEqual(result, \"Image not found\")", "def is_rgb(img: np.ndarray) -> bool:\n\n return len(img.shape) >= 1 and img.shape[-1] == 3", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def test_red_blue_filter(subtests):\n white_pixel = 255\n black_pixel = 0\n\n width = 100\n height = 100\n size = (width, height)\n\n # TestCase Red 0 Blue 255\n image = Image.new(\"RGBA\", size, (0, 0, 255))\n with subtests.test(msg=\"Red 0 Blue 255\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Red 255 Blue 0\n image = Image.new(\"RGBA\", size, (255, 0, 0))\n with subtests.test(msg=\"Red 255 Blue 0\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Only White pixels input\n image = Image.new(\"RGBA\", size, (255, 255, 255))\n with subtests.test(msg=\"Only White pixels\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Only black pixels\n image = Image.new(\"RGBA\", size, (0, 0, 0))\n with subtests.test(msg=\"Only black pixels\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue > 0.95\n image = Image.new(\"RGBA\", size, (200, 155, 100))\n with subtests.test(msg=\"Red/Blue > 0.95\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue < 0.95\n image = Image.new(\"RGBA\", size, (99, 155, 135))\n with subtests.test(msg=\"Red/Blue < 0.95\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Red/Blue > 0.95 Close to 0.95 value\n image = Image.new(\"RGBA\", size, (100, 0, 104))\n with subtests.test(msg=\"R/B > 0.95 Close to 0.95\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue < 0.95 Close to 0.95 value\n image = Image.new(\"RGBA\", size, (100, 0, 106))\n with subtests.test(msg=\"R/B < 0.95 Close to 0.95\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Alpha channel of 0\n image = Image.new(\"RGBA\", size, (0, 0, 0, 0))\n with subtests.test(msg=\"Alpha Channel value 0\", image=image):\n assert_color_all_pixels(image, size, black_pixel)", "def test_check_image_color(self):\n result = analyzer.check_image_color(\"tests/test_files/sample.jpg\")\n self.assertEqual(result, \"light\")", "def test_check_color_and_image_input(self):\n\n from m3_save_images.m3_save_images import check_color_and_image_input\n valid_path = \"../img\"\n invalid_path = \"../imgfab7841\"\n valid_image = \"citrony.jpg\"\n invalid_image = \"citrony87465.jpg\"\n valid_image_color = \"White\"\n invalid_image_color = \"White45781\"\n\n # self.assertTrue(check_path_and_img-_input(valid_path, valid_image) is None)\n with self.assertRaises(Exception):\n check_color_and_image_input(invalid_path, valid_image, valid_image_color)\n with self.assertRaises(SystemExit):\n check_color_and_image_input(valid_path, invalid_image, valid_image_color)\n with self.assertRaises(SystemExit):\n check_color_and_image_input(valid_path, valid_image, invalid_image_color)\n self.assertTrue(check_color_and_image_input(valid_path, valid_image, valid_image_color) is None)", "def test_scale_image_rgb_identity(self):\n scaled = scale_image(self.rgb_image, 1)\n self.assertEqual(self.rgb_image.shape, scaled.shape)", "def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()", "def check_record(record):\n assert isinstance(record, dict)\n assert record[\"image\"].shape == ORIGINAL_IMAGE_SIZE\n assert record[\"image_name\"].endswith(\".jpg\")\n\n # Checking ground truth information\n for gt_box in record[\"gt_bboxes\"]:\n assert isinstance(gt_box, dict)\n check_bbox(gt_box[\"bbox\"])\n assert gt_box[\"class\"] in CLASSES\n\n # Checking foreground rois\n for roi in record[\"rois\"]:\n assert isinstance(roi, dict)\n check_bbox(roi[\"bbox\"])\n assert np.sum(roi[\"class\"]) == 1\n assert roi[\"reg_target\"].shape == (4,)\n check_reg_target(record[\"gt_bboxes\"], roi)\n\n # Checking background rois\n for background_roi in record[\"rois_background\"]:\n assert isinstance(background_roi, dict)\n check_bbox(background_roi[\"bbox\"])\n expected_background_class = np.zeros(21)\n expected_background_class[0] = 1\n np.testing.assert_equal(background_roi[\"class\"], expected_background_class)\n np.testing.assert_equal(background_roi[\"reg_target\"], np.zeros(4))", "def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img", "def test_subimages_errors(self):\n with self.assertRaises(NotImplementedError):\n self.quart.plot_analyzed_subimage(\"sr\")\n with self.assertRaises(NotImplementedError):\n self.quart.save_analyzed_subimage(\"sr\")", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def test_scale_image_rgb(self):\n larger = scale_image(self.rgb_image, 1.5)\n self.assertEqual((150, 300, 3), larger.shape)\n\n smaller = scale_image(self.rgb_image, 0.2)\n self.assertEqual((20, 40, 3), smaller.shape)", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def validate_shape(self):\n if len(self._first_rgb.shape) != 5:\n raise ValueError(f\"Invalid shape: {self._first_rgb.shape}\")", "def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq", "def validate_image(path):\n problems = False\n # Rasterio env is required to make sure that the gdal bindings are setup correctly.\n with rasterio.Env():\n try:\n dataset = rasterio.open(path)\n except Exception as e:\n logging.error(\"Could not open dataset\", e)\n return False\n\n # Check the bands have sort of sensible values\n if dataset.count != args.bands:\n logging.error(f\"There is not the required number of bands. Expected {args.bands} found {dataset.count}\")\n problems = True\n\n if not data_validation.check_data(dataset):\n problems = True\n\n # Validate coordinate box doesn't cover the origin.\n # Also make sure that it has valid coordinates.\n if dataset.transform:\n top_left = dataset.transform * (0, 0)\n bottom_right = dataset.transform * (dataset.width, dataset.height)\n if np.sign(bottom_right[0]) != np.sign(top_left[0]) and np.sign(bottom_right[1]) != np.sign(top_left[1]):\n logging.error(f\"Data set appears to be over the origin of the coordinate space.\")\n problems = True\n else:\n logging.error(f\"Dataset transform is missing.\")\n problems = True\n return not problems # return true if the image is valid", "def _check_size(self, img):\n absdiff = num.abs(num.subtract(img.shape, self.expected_size))\n pctdiff = num.true_divide(absdiff, self.expected_size)\n if not num.all(pctdiff <= self.size_tolerance):\n raise StandardError('image size outside form tolerance {} != {}'\n .format(img.shape, self.expected_size))", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def test_color_balance_with_test_images(self):\n for name, img in get_test_images():\n balanced_img = balance_color(img)\n\n assert balanced_img.max() == 255, \\\n \"Maximum of a balanced image should be 255\"\n assert balanced_img.min() == 0, \\\n \"Minimum of a balanced image should be 0\"\n for channel in cv2.split(balanced_img):\n assert channel.max() == 255, \\\n \"Maximum of each channel should be 255\"\n assert channel.max() == 255, \\\n \"Minimum of each channel should be 0\"", "def test_complex(self):\n image = self.design.layout.layers[0].images[2]\n assert len(image.shape_instances) == 3", "def check_image(image, depth):\n cols, rows = image.size\n divisor = 2**depth\n n_rows = round(rows/divisor) * divisor\n n_cols = round(cols/divisor) * divisor\n # d = min(n_rows, n_cols)\n image = image.resize((n_cols, n_rows))\n image_array = np.asarray(image)\n return image_array, Fraction(n_rows, n_cols)" ]
[ "0.8038135", "0.6662857", "0.65129346", "0.6423486", "0.62485963", "0.62480164", "0.6097359", "0.60779303", "0.60570514", "0.6055576", "0.6054975", "0.6044617", "0.5984661", "0.597382", "0.5965496", "0.59394634", "0.5909699", "0.59011686", "0.5898578", "0.5880267", "0.58762836", "0.5856243", "0.5854768", "0.5830632", "0.5830406", "0.5793291", "0.5791847", "0.5761952", "0.5731518", "0.5675563" ]
0.8215671
0
Check the RGB subimage corresponding to this NDVI image looks OK.
def check_sub_image(self, ndvi_filename, input_path): rgb_filename = re.sub("NDVI","RGB",ndvi_filename) rgb_img = self.get_image(os.path.join(input_path, rgb_filename)) img_ok = check_image_ok(rgb_img, 0.05) return img_ok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"BWNDVI\",\"RGB\",ndvi_filename)\n rgb_img = Image.open(self.get_file(os.path.join(input_path, rgb_filename),\n self.input_location_type))\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def test_check_wrong_image(self):\n result = analyzer.check_image_color(\"tests/test_files/non_exists.jpg\")\n self.assertEqual(result, \"Image not found\")", "def is_rgb(img: np.ndarray) -> bool:\n\n return len(img.shape) >= 1 and img.shape[-1] == 3", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def test_red_blue_filter(subtests):\n white_pixel = 255\n black_pixel = 0\n\n width = 100\n height = 100\n size = (width, height)\n\n # TestCase Red 0 Blue 255\n image = Image.new(\"RGBA\", size, (0, 0, 255))\n with subtests.test(msg=\"Red 0 Blue 255\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Red 255 Blue 0\n image = Image.new(\"RGBA\", size, (255, 0, 0))\n with subtests.test(msg=\"Red 255 Blue 0\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Only White pixels input\n image = Image.new(\"RGBA\", size, (255, 255, 255))\n with subtests.test(msg=\"Only White pixels\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Only black pixels\n image = Image.new(\"RGBA\", size, (0, 0, 0))\n with subtests.test(msg=\"Only black pixels\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue > 0.95\n image = Image.new(\"RGBA\", size, (200, 155, 100))\n with subtests.test(msg=\"Red/Blue > 0.95\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue < 0.95\n image = Image.new(\"RGBA\", size, (99, 155, 135))\n with subtests.test(msg=\"Red/Blue < 0.95\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Red/Blue > 0.95 Close to 0.95 value\n image = Image.new(\"RGBA\", size, (100, 0, 104))\n with subtests.test(msg=\"R/B > 0.95 Close to 0.95\", image=image):\n assert_color_all_pixels(image, size, white_pixel)\n\n # TestCase Red/Blue < 0.95 Close to 0.95 value\n image = Image.new(\"RGBA\", size, (100, 0, 106))\n with subtests.test(msg=\"R/B < 0.95 Close to 0.95\", image=image):\n assert_color_all_pixels(image, size, black_pixel)\n\n # TestCase Alpha channel of 0\n image = Image.new(\"RGBA\", size, (0, 0, 0, 0))\n with subtests.test(msg=\"Alpha Channel value 0\", image=image):\n assert_color_all_pixels(image, size, black_pixel)", "def test_check_image_color(self):\n result = analyzer.check_image_color(\"tests/test_files/sample.jpg\")\n self.assertEqual(result, \"light\")", "def test_check_color_and_image_input(self):\n\n from m3_save_images.m3_save_images import check_color_and_image_input\n valid_path = \"../img\"\n invalid_path = \"../imgfab7841\"\n valid_image = \"citrony.jpg\"\n invalid_image = \"citrony87465.jpg\"\n valid_image_color = \"White\"\n invalid_image_color = \"White45781\"\n\n # self.assertTrue(check_path_and_img-_input(valid_path, valid_image) is None)\n with self.assertRaises(Exception):\n check_color_and_image_input(invalid_path, valid_image, valid_image_color)\n with self.assertRaises(SystemExit):\n check_color_and_image_input(valid_path, invalid_image, valid_image_color)\n with self.assertRaises(SystemExit):\n check_color_and_image_input(valid_path, valid_image, invalid_image_color)\n self.assertTrue(check_color_and_image_input(valid_path, valid_image, valid_image_color) is None)", "def test_scale_image_rgb_identity(self):\n scaled = scale_image(self.rgb_image, 1)\n self.assertEqual(self.rgb_image.shape, scaled.shape)", "def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()", "def check_record(record):\n assert isinstance(record, dict)\n assert record[\"image\"].shape == ORIGINAL_IMAGE_SIZE\n assert record[\"image_name\"].endswith(\".jpg\")\n\n # Checking ground truth information\n for gt_box in record[\"gt_bboxes\"]:\n assert isinstance(gt_box, dict)\n check_bbox(gt_box[\"bbox\"])\n assert gt_box[\"class\"] in CLASSES\n\n # Checking foreground rois\n for roi in record[\"rois\"]:\n assert isinstance(roi, dict)\n check_bbox(roi[\"bbox\"])\n assert np.sum(roi[\"class\"]) == 1\n assert roi[\"reg_target\"].shape == (4,)\n check_reg_target(record[\"gt_bboxes\"], roi)\n\n # Checking background rois\n for background_roi in record[\"rois_background\"]:\n assert isinstance(background_roi, dict)\n check_bbox(background_roi[\"bbox\"])\n expected_background_class = np.zeros(21)\n expected_background_class[0] = 1\n np.testing.assert_equal(background_roi[\"class\"], expected_background_class)\n np.testing.assert_equal(background_roi[\"reg_target\"], np.zeros(4))", "def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img", "def test_subimages_errors(self):\n with self.assertRaises(NotImplementedError):\n self.quart.plot_analyzed_subimage(\"sr\")\n with self.assertRaises(NotImplementedError):\n self.quart.save_analyzed_subimage(\"sr\")", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def test_scale_image_rgb(self):\n larger = scale_image(self.rgb_image, 1.5)\n self.assertEqual((150, 300, 3), larger.shape)\n\n smaller = scale_image(self.rgb_image, 0.2)\n self.assertEqual((20, 40, 3), smaller.shape)", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def validate_shape(self):\n if len(self._first_rgb.shape) != 5:\n raise ValueError(f\"Invalid shape: {self._first_rgb.shape}\")", "def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq", "def validate_image(path):\n problems = False\n # Rasterio env is required to make sure that the gdal bindings are setup correctly.\n with rasterio.Env():\n try:\n dataset = rasterio.open(path)\n except Exception as e:\n logging.error(\"Could not open dataset\", e)\n return False\n\n # Check the bands have sort of sensible values\n if dataset.count != args.bands:\n logging.error(f\"There is not the required number of bands. Expected {args.bands} found {dataset.count}\")\n problems = True\n\n if not data_validation.check_data(dataset):\n problems = True\n\n # Validate coordinate box doesn't cover the origin.\n # Also make sure that it has valid coordinates.\n if dataset.transform:\n top_left = dataset.transform * (0, 0)\n bottom_right = dataset.transform * (dataset.width, dataset.height)\n if np.sign(bottom_right[0]) != np.sign(top_left[0]) and np.sign(bottom_right[1]) != np.sign(top_left[1]):\n logging.error(f\"Data set appears to be over the origin of the coordinate space.\")\n problems = True\n else:\n logging.error(f\"Dataset transform is missing.\")\n problems = True\n return not problems # return true if the image is valid", "def _check_size(self, img):\n absdiff = num.abs(num.subtract(img.shape, self.expected_size))\n pctdiff = num.true_divide(absdiff, self.expected_size)\n if not num.all(pctdiff <= self.size_tolerance):\n raise StandardError('image size outside form tolerance {} != {}'\n .format(img.shape, self.expected_size))", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def test_color_balance_with_test_images(self):\n for name, img in get_test_images():\n balanced_img = balance_color(img)\n\n assert balanced_img.max() == 255, \\\n \"Maximum of a balanced image should be 255\"\n assert balanced_img.min() == 0, \\\n \"Minimum of a balanced image should be 0\"\n for channel in cv2.split(balanced_img):\n assert channel.max() == 255, \\\n \"Maximum of each channel should be 255\"\n assert channel.max() == 255, \\\n \"Minimum of each channel should be 0\"", "def test_complex(self):\n image = self.design.layout.layers[0].images[2]\n assert len(image.shape_instances) == 3", "def check_image(image, depth):\n cols, rows = image.size\n divisor = 2**depth\n n_rows = round(rows/divisor) * divisor\n n_cols = round(cols/divisor) * divisor\n # d = min(n_rows, n_cols)\n image = image.resize((n_cols, n_rows))\n image_array = np.asarray(image)\n return image_array, Fraction(n_rows, n_cols)" ]
[ "0.8215671", "0.6662857", "0.65129346", "0.6423486", "0.62485963", "0.62480164", "0.6097359", "0.60779303", "0.60570514", "0.6055576", "0.6054975", "0.6044617", "0.5984661", "0.597382", "0.5965496", "0.59394634", "0.5909699", "0.59011686", "0.5898578", "0.5880267", "0.58762836", "0.5856243", "0.5854768", "0.5830632", "0.5830406", "0.5793291", "0.5791847", "0.5761952", "0.5731518", "0.5675563" ]
0.8038135
1
Each date will have a subdirectory called 'SPLIT' with ~400 NDVI subimages.
def process_single_date(self, date_string): # see if there is already a ndvi.json file in # the output location - if so, skip output_location = os.path.join(self.output_location, date_string,"JSON","NDVI") if (not self.replace_existing_files) and \ self.check_for_existing_files(output_location, 1): return True input_path = os.path.join(self.input_location, date_string, "SPLIT") all_input_files = self.list_directory(input_path, self.input_location_type) print("input path is {}".format(input_path)) # list all the "NDVI" sub-images where RGB image passes quality check input_files = [filename for filename in all_input_files \ if "_NDVI" in filename and \ self.check_sub_image(filename, input_path)] if len(input_files) == 0: print("{}: No sub-images for date {}".format(self.name, date_string)) return else: print("{} found {} sub-images".format(self.name, len(input_files))) # if we only want a subset of sub-images, truncate the list here if self.n_sub_images > 0: input_files = input_files[:self.n_sub_images] ndvi_vals = [] for ndvi_file in input_files: coords_string = find_coords_string(ndvi_file) ndvi_dict = self.process_sub_image(os.path.join(input_path, ndvi_file), date_string, coords_string) ndvi_vals.append(ndvi_dict) self.save_json(ndvi_vals, "ndvi_values.json", output_location, self.output_location_type) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def split_data(src, dst, ratio=0.2):\n dirs = [f for f in os.listdir(src) if os.path.isdir(os.path.join(src, f)) and not f.startswith('.')]\n for d in dirs:\n src_subdir = os.path.join(src, d)\n dst_subdir = os.path.join(dst, d)\n if not os.path.exists(dst_subdir):\n os.makedirs(dst_subdir)\n imgs = [f for f in os.listdir(src_subdir) if os.path.isfile(os.path.join(src_subdir, f)) and not f.startswith('.')]\n for img in imgs:\n if np.random.uniform() <= ratio:\n move(os.path.join(src_subdir, img), dst_subdir)\n print('split done')", "def process_single_date(self, date_string):\n # see if there is already a network_centralities.json file in\n # the output location - if so, skip\n output_location = os.path.join(self.output_location, date_string,\"JSON\",\"NC\")\n if (not self.replace_existing_files) and \\\n self.check_for_existing_files(output_location, 1):\n return True\n\n input_path = os.path.join(self.input_location, date_string, \"SPLIT\")\n all_input_files = self.list_directory(input_path, self.input_location_type)\n print(\"input path is {}\".format(input_path))\n\n # list all the \"BWNDVI\" sub-images where RGB image passes quality check\n input_files = [filename for filename in all_input_files \\\n if \"BWNDVI\" in filename and \\\n self.check_sub_image(filename, input_path)]\n if len(input_files) == 0:\n print(\"{}: No sub-images for date {}\".format(self.name,\n date_string))\n return\n else:\n print(\"{} found {} sub-images\".format(self.name, len(input_files)))\n tmp_json_dir = tempfile.mkdtemp()\n\n # if we only want a subset of sub-images, truncate the list here\n if self.n_sub_images > 0:\n input_files = input_files[:self.n_sub_images]\n\n # create a multiprocessing pool to handle each sub-image in parallel\n with Pool(processes=self.n_threads) as pool:\n # prepare the arguments for the process_sub_image function\n arguments=[(i,\n self.get_file(os.path.join(input_path,filename),\n self.input_location_type),\n tmp_json_dir,\n date_string,\n find_coords_string(filename)) \\\n for i, filename in enumerate(input_files)]\n pool.starmap(process_sub_image, arguments)\n # put all the output json files for subimages together into one for this date\n print(\"\\n Consolidating json from all subimages\")\n all_subimages = consolidate_json_to_list(tmp_json_dir)\n self.save_json(all_subimages, \"network_centralities.json\",\n output_location,\n self.output_location_type)\n shutil.rmtree(tmp_json_dir)\n return True", "def process_subdirectory(subdir_path, regex_images, output_folder_if_pickle,\n min_n_leftside_metadata=10, only_ionogram_content_extraction_on_leftside_metadata=True, to_pickle=True):\n # Run segment_images on the subdirectory \n df_img,df_loss,df_outlier = segment_images(subdir_path, regex_images)\n\n # Determine ionogram grid mappings used to map (x,y) pixel coordinates of ionogram trace to (Hz, km) values\n stack = all_stack(df_img)\n col_peaks,row_peaks,mapping_Hz, mapping_km = get_grid_mappings(stack)\n\n # Translate metadata located on the left\n df_img_left = df_img[df_img['metadata_type']== 'left']\n \n if len(df_img_left.index) > min_n_leftside_metadata:\n # Determine leftside metadata grid (pixel coordinates to number, category mappings)\n df_img_left, df_loss_meta,dict_mapping,dict_hist= get_leftside_metadata(df_img_left,subdir_path)\n df_all_loss = df_loss.append(df_loss_meta)\n else:\n df_all_loss = df_loss\n \n # Extract the coordinates of the ionogram trace (black), Map the (x,y) pixel coordinates to (Hz, km) values and Extract select parameters i.e. fmin\n if only_ionogram_content_extraction_on_leftside_metadata:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img_left,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n else:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n\n df_processed['mapping_Hz'] = [mapping_Hz] * len(df_processed.index)\n df_processed['mapping_km'] = [mapping_km] * len(df_processed.index)\n\n if to_pickle:\n start,subdir_name = ntpath.split(subdir_path[:-1])\n start,dir_name = ntpath.split(start)\n df_processed.to_pickle(os.pardir + '/pickle/' + str(dir_name)+'_'+str(subdir_name)+'.pkl')\n \n df_all_loss = df_all_loss.append(df_loss_coord)\n return df_processed, df_all_loss,df_outlier", "def __init__(self, dataroot, img_size, filelists_path, split):\n self.image_path = dataroot\n self.img_size = img_size\n self.split = split\n self.all_videos = get_image_list(self.image_path, self.split)", "def split_and_save_sub_images(self, image,\n date_string,\n coords_string,\n image_type,\n npix=50):\n\n coords = [float(coord) for coord in coords_string.split(\"_\")]\n sub_images = crop_image_npix(image, npix,\n region_size=self.region_size,\n coords = coords)\n\n output_location = os.path.dirname(self.construct_image_savepath(date_string,\n coords_string,\n 'SUB_'+image_type))\n for i, sub in enumerate(sub_images):\n # sub will be a tuple (image, coords) - unpack it here\n sub_image, sub_coords = sub\n output_filename = f'sub{i}_'\n output_filename += \"{0:.3f}_{1:.3f}\".format(sub_coords[0],\n sub_coords[1])\n output_filename += \"_{}\".format(image_type)\n output_filename += '.png'\n self.save_image(sub_image, output_location, output_filename, verbose=False)\n return True", "def splitDir(dir,split):\n\n # Contents of dir\n dir_list = os.listdir(dir)\n dir_list.sort()\n \n # Split interval\n l = len(dir_list)\n interval = l/split\n \n # Split dir\n for i in range(split - 1):\n new_dir = \"%s_%i\" % (dir,i)\n os.mkdir(new_dir)\n file_list = dir_list[i*interval:(i+1)*interval]\n for f in file_list:\n shutil.copy(os.path.join(dir,f),new_dir)\n\n # Grab last part of directory (even if not divisible by split)\n new_dir = \"%s_%i\" % (dir,i+1)\n os.mkdir(new_dir)\n file_list = dir_list[(i+1)*interval:]\n for f in file_list:\n shutil.copy(os.path.join(dir,f),new_dir)", "def __init__(self,imageDataPath,onlyRGB=False):\r\n self.bands = [] if onlyRGB else [None] * 13\r\n self.bandsNames = [] if onlyRGB else [None] * 13\r\n self.isRgb = onlyRGB\r\n with os.scandir(imageDataPath) as imageSCND:\r\n for imgFolderItem in sorted(imageSCND, key=lambda e: e.name):\r\n if imgFolderItem.is_dir():\r\n self.day = imgFolderItem.name[11:15]\r\n self.month = imgFolderItem.name[15:17]\r\n self.year = imgFolderItem.name[17:19]\r\n self.nameDiscriminator = imgFolderItem.name[-27:-5] #Needed because snap unity\r\n granulePath = imageDataPath + slash + imgFolderItem.name + \\\r\n slash + \"GRANULE\" + slash\r\n #log.debug(\"Granule path is: \" + granulePath)\r\n with os.scandir(granulePath) as granuleSCND:\r\n for granuleItem in sorted(granuleSCND, key=lambda e: e.name):\r\n imgDataPath = granulePath + granuleItem.name + \\\r\n slash + \"IMG_DATA\" + slash\r\n #log.debug(\"IMG_DATA path is: \" + imgDataPath)\r\n with os.scandir(imgDataPath) as imgDataSCND:\r\n for band in sorted(imgDataSCND, key=lambda e: e.name):\r\n bandPath = imgDataPath + slash + band.name\r\n if onlyRGB and Bands.TCI.Id in band.name:\r\n #log.debug(\"loading TCI: \" + band.name)\r\n self.bands.append(rasterio.open(bandPath))\r\n self.bandsNames.append(band.name)\r\n return None\r\n elif not onlyRGB and Bands.TCI.Id not in band.name:\r\n #log.debug(\"loading band: \" + band.name)\r\n index = utils.bandIdToIndex( \\\r\n utils.getBandIdFromBandPath(band.name))\r\n self.bands[index] = rasterio.open(bandPath)\r\n self.bandsNames[index] = band.name\r\n return None", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\t# print(path)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\t# print(len(train_imgs))\n\t\t\t# break\n\t\t\tfor imgname in train_imgs:\n\t\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\t\timg = cv2.imread(imgname)\n\t\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\t\timg_label = img[:,:,0]\n\t\t\t\tcv2.imwrite(path_train+\"/\"+midname+\".\"+self.img_type,img_train)\n\t\t\t\tcv2.imwrite(path_label+\"/\"+midname+\".\"+self.img_type,img_label)", "def filelist(basedir):\n day_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith(\".png\"):\n day_files.append(os.path.join(file))\n dates_files = []\n\n for i in day_files:\n year = i.split('_')[1]\n day = i.split('_')[2]\n mounth = i.split('_')[3]\n hour = i.split('_')[4]\n dates_files.append(UTCDateTime(year+'-'+mounth+'-'+day+'T'+hour)-3)\n return sorted(dates_files)", "def lap_split_n(img, n):\n levels = []\n\n print(\"inside lap_split_n function \")\n\n for i in range(n):\n img, hi = lap_split(img)\n levels.append(hi)\n levels.append(img)\n return levels[::-1]", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def split_dir(dirr, output_dir, dirs=['train', 'validation', 'test'], split=(.5,.25,.25)):\n\n # get all image paths\n image_paths = []\n for filepath in pathlib.Path(dirr).glob('**/*'):\n image_paths.append(filepath.absolute())\n\n # organize into {class_name:[class_image_paths, ...], ...}\n class_dict = {}\n for i in image_paths:\n fname = str(i).split(\"/\")\n file_name = fname[len(fname)-1]\n class_name = fname[len(fname)-2]\n if class_name not in class_dict.keys():\n class_dict[class_name] = []\n class_dict[class_name].append(str(i))\n\n del class_dict['images'] #I don't know why\n\n # organize into {class_name:[[train_paths],[validation_paths],[test_paths]], ...}\n # by given\n for k in class_dict.keys():\n paths = class_dict[k]\n\n train_split = int(len(paths)*split[0])\n validation_split = int(len(paths)*split[1])\n\n train_paths = paths[train_split:]\n validation_paths = paths[train_split:validation_split+train_split]\n test_paths = paths[validation_split+train_split:]\n\n class_dict[k] = [train_paths, validation_paths, test_paths]\n\n # make output dirs\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n os.makedirs(output_dir+\"/\"+dirs[0])\n os.makedirs(output_dir+\"/\"+dirs[1])\n os.makedirs(output_dir+\"/\"+dirs[2])\n\n # move everything\n for k in class_dict.keys():\n for d_i,d in enumerate(dirs):\n\n if not os.path.exists(output_dir+\"/\"+d+\"/\"+k):\n os.makedirs(output_dir+\"/\"+d+\"/\"+k)\n\n for path in class_dict[k][d_i]:\n file_name = path.split(\"/\")\n file_name = file_name[len(file_name)-1]\n copyfile(path, output_dir+\"/\"+d+\"/\"+k+\"/\"+file_name)", "def create_scn_date_imgs(self, start_date, end_date, img_size, out_img_dir, img_format, vec_file, vec_lyr, tmp_dir, order_desc=True):\n out_img_ext = 'png'\n if img_format.upper() == 'PNG':\n out_img_ext = 'png'\n elif img_format.upper() == 'JPEG':\n out_img_ext = 'jpg'\n elif img_format.upper() == 'GTIFF':\n out_img_ext = 'tif'\n else:\n raise EODataDownException(\"The input image format ({}) was recognised\".format(img_format))\n eoddutils = eodatadown.eodatadownutils.EODataDownUtils()\n scn_dates = self.find_unique_scn_dates(start_date, end_date, valid=True, order_desc=order_desc)\n scn_qklks = dict()\n for scn_date in scn_dates:\n print(\"Processing {}:\".format(scn_date[0].strftime('%Y-%m-%d')))\n scns = self.get_scns_for_date(scn_date[0])\n scn_files = []\n for scn in scns:\n ard_file = eoddutils.findFile(scn.ARDProduct_Path, \"*dB*.tif\")\n print(\"\\t{}: {} - {}\".format(scn.PID, scn.Scene_ID, ard_file))\n scn_files.append(ard_file)\n\n # VV, VH, VV/VH\n bands = '1,2,3'\n\n scn_date_str = scn_date[0].strftime('%Y%m%d')\n quicklook_img = os.path.join(out_img_dir, \"sen1_qklk_{}.{}\".format(scn_date_str, out_img_ext))\n import rsgislib.tools.visualisation\n rsgislib.tools.visualisation.createQuicklookOverviewImgsVecOverlay(scn_files, bands, tmp_dir,\n vec_file, vec_lyr,\n outputImgs=quicklook_img,\n output_img_sizes=img_size,\n gdalformat=img_format,\n scale_axis='auto',\n stretch_file=self.std_vis_img_stch,\n overlay_clr=[255, 255, 255])\n scn_qklks[scn_date_str] = dict()\n scn_qklks[scn_date_str]['qkimage'] = quicklook_img\n scn_qklks[scn_date_str]['scn_date'] = scn_date[0]\n return scn_qklks", "def split_folder(data_dir, train_pct, val_pct):\n\n random.seed(1)\n\n IMG_SUFFIX = '*_sat.jpg'\n MASK_SUFFIX = '*_msk.png'\n\n glob_imgs = os.path.join(data_dir,IMG_SUFFIX)\n glob_masks = os.path.join(data_dir, MASK_SUFFIX)\n\n img_paths = np.array(sorted(glob.glob(glob_imgs)))\n mask_paths = np.array(sorted(glob.glob(glob_masks)))\n \n num_imgs = len(img_paths)\n index_lst = list(range(num_imgs))\n\n random.shuffle(index_lst)\n\n train_idx_bound = int(train_pct * num_imgs)\n train_imgs = img_paths[index_lst[:train_idx_bound]]\n train_masks = mask_paths[index_lst[:train_idx_bound]]\n\n val_idx_bound = int((train_pct + val_pct) * num_imgs)\n val_imgs = img_paths[index_lst[train_idx_bound: val_idx_bound]]\n val_masks = mask_paths[index_lst[train_idx_bound: val_idx_bound]]\n\n test_imgs = img_paths[index_lst[val_idx_bound:]]\n test_masks = mask_paths[index_lst[val_idx_bound:]]\n\n # Write the lists to their own directories\n copy_list_to_dir(train_imgs, \"train\")\n print(\"Moved images into: train\")\n copy_list_to_dir(train_masks, \"train\")\n print(\"Moved masks into: train\")\n copy_list_to_dir(val_imgs, \"val\")\n print(\"Moved images into: val\")\n copy_list_to_dir(val_masks, \"val\")\n print(\"Moved masks into: val\")\n copy_list_to_dir(test_imgs, \"test\")\n print(\"Moved images into: test\")\n copy_list_to_dir(test_masks, \"test\")\n print(\"Moved masks into: test\")", "def split_image(origindatadir,traindir,overload = False):\n \"\"\"origindatadir: from where to import train_data\"\"\"\n \"\"\"traindir: where to save the split data \"\"\"\n \"\"\"overload: if True and traindir and data already exist, delete traindir and split origin data again\"\"\"\n if not os.path.exists(origindatadir):\n return\n cats_dir = traindir+'/cats'\n dogs_dir = traindir+'/dogs'\n if not os.path.exists(traindir):\n os.mkdir(traindir)\n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n #print(traindir)\n if get_subdir_filenum(traindir) > 0:\n if overload:\n shutil.rmtree(traindir)\n os.mkdir(traindir) \n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n print(\"Destination directory already exist:\",traindir)\n return\n #开始复制\n filenames = os.listdir('train')\n for file in filenames:\n if str(file).startswith('cat'):\n shutil.copyfile(origindatadir+'/'+file, cats_dir+'/'+file) \n elif str(file).startswith('dog'):\n shutil.copyfile(origindatadir+'/'+file, dogs_dir+'/'+file)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def remove_unseasonal_images(data, date_inf=\"15-05\", date_sup=\"15-10\"):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if seasonal(path, date_inf, date_sup) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)", "def B_VER(self, ver_path, dlist): \n for folder in dlist:\n dpath = os.path.join(ver_path, folder)\n #Ensure folder exists\n assert(os.path.exists(dpath)),\"B_VER Error: Folder {folder} cannot be found in {ver_path}\".format(folder=folder, ver_path=ver_path)\n \n csvpath = os.path.join(dpath, 'data.csv')\n #Check for data.csv\n assert(os.path.isfile(csvpath)),\"B_VER Error: data.csv could not be found in {dpath}\".format(dpath=dpath)\n\n #Verify 4 columns in csv\n df = pd.read_csv(csvpath)\n assert(len(df.columns) == 4),\"B_VER Error: Need four columns in data.csv in {csvpath}\".format(csvpath=csvpath)\n\n #The number of jpg files in this directory should equal num rows\n num_jpgs = len(glob.glob1(dpath, \"*.jpg\"))\n assert(len(df) == num_jpgs),\"B_VER Error: num_jpgs in {dpath} must = num_rows in data.csv\".format(dpath=dpath)\n\n #Index the first 20 and last 20, and ensure that those images exist\n for i in range(20):\n img_name = df.iloc[i, 0]\n framepath = os.path.join(dpath, img_name)\n frame = cv2.imread(framepath)\n assert(os.path.isfile(framepath)), \"B_VER Error: frame {framepath} is not a path, but is in the csv\".format(framepath=framepath)\n\n for i in range(20):\n img_name = df.iloc[-i, 0]\n framepath = os.path.join(dpath, img_name)\n frame = cv2.imread(framepath)\n assert(os.path.isfile(framepath)), \"B_VER Error: frame {framepath} is not a path, but is in the csv\".format(framepath=framepath)", "def splitTrainAndTest(root_path,split_ratio = 0.8,save_path = \"/usr/\",isWrite = False):\r\n train_file_paths = []\r\n val_file_paths = []\r\n root = pathlib.Path(root_path)\r\n cate_list = root.glob(\"*\")\r\n\r\n # recurrent each a cate\r\n for cate in cate_list:\r\n\r\n file_list = list(map(str,list(cate.glob(\"*\"))))\r\n # no video sequence\r\n np.random.shuffle(file_list)\r\n length = len(file_list)\r\n train_file_paths.extend(file_list[:int(split_ratio*length)])\r\n val_file_paths.extend(file_list[int(split_ratio*length):])\r\n # write file\r\n if(isWrite):\r\n dic = {\"train_file_paths\":train_file_paths,\"val_file_paths\":val_file_paths}\r\n with open(save_path,\"w\") as file:\r\n json.dump(dic,file)\r\n return train_file_paths,val_file_paths", "def split_volume(image_name, output_name):\n nim = nib.load(image_name)\n Z = nim.header['dim'][3]\n affine = nim.affine\n image = nim.get_data()\n\n for z in range(Z):\n image_slice = image[:, :, z]\n image_slice = np.expand_dims(image_slice, axis=2)\n affine2 = np.copy(affine)\n affine2[:3, 3] += z * affine2[:3, 2]\n nim2 = nib.Nifti1Image(image_slice, affine2)\n nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, z))", "def split_test_train(train_folder_path, train_labels, test_folder, n_test_images):\n\n os.makedirs(test_folder, exist_ok=True)\n\n data = read_csv_to_list(train_labels)\n # Prepare test labels and move images to new folder\n labels = []\n for img in data[1:n_test_images]:\n # Input and new image paths\n # print(type(train_folder_path),type(img[0]))\n img_path = train_folder_path / (img[0] + \".dcm\")\n new_img_path = test_folder / (img[0] + \".dcm\")\n if Path(img_path).exists(): # there can be several annotations per image\n shutil.move(img_path, new_img_path)\n labels.append(img)\n\n # Prepare train labels. Removes duplicate as we dont need them.\n train_labels = []\n img_list_names = []\n for idx, label in enumerate(data[n_test_images + 1 :]):\n if (label[0] in img_list_names) and (idx != 0):\n continue\n img_list_names.append(label[0])\n train_labels.append(label)\n\n # labels.insert(0, data[0])\n # train_labels.insert(0, data[0])\n return train_labels, labels", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def split_list(self, num_bit = 5):\n jpg_list = listdir(join(self.dataset_dir, 'data/jpg_images'))\n label_list = listdir(join(self.dataset_dir, 'data/label_mat'))\n split_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/split.mat'))\n train_raw = split_raw['trainIds']\n test_raw = split_raw['testIds']\n val_raw = split_raw['valIds']\n pattern_index = \"%0\" + str(num_bit) + \"d\"\n training_list = []\n for i in range(train_raw.shape[0]):\n train_id = pattern_index % train_raw[i, 0]\n if (((train_id + '.jpg') in jpg_list) and ((train_id + '.mat') in label_list)):\n training_list.append(train_id)\n test_list = []\n for i in range(test_raw.shape[0]):\n test_id = pattern_index % test_raw[i, 0]\n if (((test_id + '.jpg') in jpg_list) and ((test_id + '.mat') in label_list)):\n test_list.append(test_id)\n val_list = []\n for i in range(val_raw.shape[0]):\n val_id = pattern_index % val_raw[i, 0]\n if (((val_id + '.jpg') in jpg_list) and ((val_id + '.mat') in label_list)):\n val_list.append(val_id)\n return [training_list, test_list, val_list]", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def testSplitImage(root):\n\n s1, s2 = splitImage(\"vck.tif\")\n v = OR(s1, s2).view(root)\n return v", "def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images", "def _submodel_images_path(self, i):\n template = self.config['submodel_images_relpath_template']\n return os.path.join(self.data_path, template % i)", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')" ]
[ "0.595133", "0.58945966", "0.58325934", "0.5802168", "0.57542825", "0.5715518", "0.5714086", "0.5603562", "0.557431", "0.5570771", "0.5569425", "0.55580586", "0.55430764", "0.5533043", "0.55239487", "0.55221367", "0.5467713", "0.5467154", "0.54571563", "0.53589416", "0.5356229", "0.5350711", "0.5338257", "0.53343195", "0.53028274", "0.5295435", "0.5294542", "0.5292848", "0.529261", "0.5292568" ]
0.6070169
0
Create a tailored SVM classifier to further use to predict
def custom_training(nb_tweet_sample, randomised, equal_pos_neg, language, name_kernel, Resource, keep_null_vector): m_features, m_labels = get_characteristic_label_vectors(nb_tweet_sample, randomised, equal_pos_neg, Resource, keep_null_vector, language) kernel = Kernel.get_correct_kernel(name_kernel) custom_SVM = SVM(kernel) custom_SVM.fit(m_features, m_labels) return custom_SVM
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_SVM(data: np.array, labels: np.array)->None:\n print(\"SVM is not implemented yet!\")", "def svm():", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n import sklearn\n from sklearn import svm\n\n \"*** YOUR CODE HERE ***\"\n self.sklearn_classifier = svm.SVC(C=2, gamma=0.025, decision_function_shape='ovo', tol=0.015)\n self.sklearn_classifier.fit(trainingData, trainingLabels)", "def train_svm_model(self, X_train, X_test, y_train, y_test):\r\n clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\r\n ('clf', LinearSVC())])\r\n clf = clf.fit(X_train, y_train)\r\n pred = clf.predict(X_test)\r\n print('Confusion matrix\\n',confusion_matrix(y_test,pred))\r\n print('Classification_report\\n',classification_report(y_test,pred))\r\n return clf", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n from sklearn import svm\n \n \"*** YOUR CODE HERE ***\"\n self.sklearn_svm = svm.SVC(C=5, kernel='rbf', gamma=0.005, decision_function_shape='ovo')\n self.sklearn_svm.fit(trainingData, trainingLabels)", "def __init__(self,training_data,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVC(class_weight='auto',cache_size=DEFAULT_CACHE_SIZE, kernel=default_kernel)\n self.classifier.fit(X, Y)", "def svm(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n svm = SVC(C=10, cache_size=1500, class_weight='balanced')\n svm = __train_and_test(svm, xtrain, ytrain, xtest, ytest,labels_mapping)\n return svm", "def svm_classify(train_image_feats, train_labels, test_image_feats, kernel_type):\r\n\r\n categories = np.unique(train_labels)\r\n # [Desc] make 15 different SVM solver (one(each category) vs. the other(14 other category))\r\n svc_list = []\r\n num_categories = len(categories)\r\n for cat_i in tqdm(range(num_categories)):\r\n category = categories[cat_i]\r\n if kernel_type == 'RBF':\r\n svc = svm.SVC(kernel='rbf', probability=True)\r\n elif kernel_type == 'linear':\r\n svc = svm.SVC(kernel='linear', probability=True)\r\n new_label_for_svm = np.where(train_labels == category, 1, 0)\r\n\r\n svc.fit(train_image_feats, new_label_for_svm)\r\n svc_list.append(svc)\r\n\r\n # [Desc] get test images' class using trained svm\r\n probability_list = []\r\n for cat_i in range(num_categories):\r\n svc = svc_list[cat_i]\r\n logit = svc.decision_function(test_image_feats)\r\n probability = logit\r\n probability_list.append(probability)\r\n probability_mat = np.array(probability_list)\r\n probability_mat = np.transpose(probability_mat)\r\n # [Desc] get each class to argmax each logit value.\r\n argmax_class = np.argmax(probability_mat, axis=1)\r\n\r\n return categories[argmax_class]", "def setup_svm_classifier(training_data, y_training, testing_data, features, method=\"count\", ngrams=(1,1)):\n # generate x and y training data\n\n if method == \"count\":\n vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)\n elif method == \"tfidf\":\n vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)\n else:\n print(\"Method has to be either count or tfidf\")\n return 1\n\n # train classifier\n\n model = SVMClassifier_scratch()\n model.fit(x_training, y_training)\n\n return model, vec, x_testing", "def setup_svm(self, classifier_name=\"SVM\", **kwargs):\n if not classifier_name in self.classifiers:\n clf = svm.SVC(**kwargs)\n clf.fit(self.X_train, self.y_train)\n self.classifiers[classifier_name] = clf", "def __init__(self, foundVariables, trainingData, trainingClasses, trainingWeights, testingData, testingClasses):\n self.clf = svm.SVC(probability=True)\n self.foundVariables = foundVariables\n self.trainingData = trainingData\n self.trainingClasses = trainingClasses\n self.testingData = testingData\n self.testingClasses = testingClasses\n self.trainingWeights = trainingWeights", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n self.n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #pre-processing data\n X = preprocessing.scale(np.hsplit(my_data,[self.n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[self.n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[self.n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVR(kernel='linear', C=1e3, cache_size=DEFAULT_CACHE_SIZE)\n #self.classifier = svm.SVR(kernel='rbf', C=1e3, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X, Y)", "def __init__(self,training_data, outliers_proportion,base_nu=0.95,min_nu=0.05,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n\n #preprocessing data\n X = preprocessing.scale(my_data)\n\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(my_data)\n\n #define classifier\n self.classifier = svm.OneClassSVM(nu=((base_nu*outliers_proportion)+min_nu), kernel=default_kernel, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X)", "def svm_clf_training(max_features, data):\r\n X_train, y_train, X_test, y_test = data\r\n clf = Pipeline([('feature_selection', SelectKBest(score_func=chi2, k=max_features)),\r\n ('clf', svm.SVC(C=1., kernel='linear'))])\r\n\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True) # unigrams and bigrams\r\n X_matrix_tr = vectorizer.fit_transform(X_train)\r\n # parameters = [{'clf__kernel': ['linear'], 'clf__C': [0.1, 1, 10, 100]},\r\n # {'clf__kernel': ['rbf'], 'clf__C': [0.1, 1, 10, 100], 'clf__gamma': [0.001, 0.01, 0.1]},\r\n # {'clf__kernel': ['poly'], 'clf__C': [0.1, 1, 10, 100], 'clf__degree': [2, 3, 4, 5]}]\r\n # clf = GridSearchCV(svc, parameters, scoring='accuracy')\r\n clf.fit(X_matrix_tr, y_train)\r\n # print(\"Best parameters set found on development set:\")\r\n # print()\r\n # print(clf.best_estimator_)\r\n # print()\r\n # print(\"Grid scores on development set:\")\r\n # print()\r\n # for params, mean_score, scores in clf.grid_scores_:\r\n # print(\"%0.3f (+/-%0.03f) for %r\"\r\n # % (mean_score, scores.std() / 2, params))\r\n # print()\r\n voc = vectorizer.get_feature_names()\r\n # vectorizer1 = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=voc)\r\n # X_matrix_val = vectorizer1.fit_transform(X_test)\r\n # y_pred = clf.predict(X_matrix_val)\r\n\r\n # for i in range(len(X_test)):\r\n # if y_test[i] != y_pred[i]:\r\n # print(X_test[i], y_test[i], y_pred[i])\r\n # print(classification_report(y_test, y_pred))\r\n return clf, voc", "def SVM():\n x1, x2 = generate_training_data_2D()\n Y = np.concatenate([np.zeros(x1.shape[0], dtype=np.int32),\n np.ones(x2.shape[0], dtype=np.int32)])\n X = np.concatenate([x1, x2], axis=0)\n rng = np.random.get_state()\n np.random.shuffle(X)\n # Set the random state back to previous to shuffle X & Y similarly\n np.random.set_state(rng)\n np.random.shuffle(Y)\n\n models, titles = get_fitted_svm(X, Y)\n\n plot_decision_boundary(X, Y, models, titles)", "def SVM(x_train, x_test, y_train, kernel):\n svc_model = SVC(kernel=kernel)\n svc_model.fit(x_train, y_train)\n y_pred = svc_model.predict(x_test)\n return y_pred", "def trainNewModel():\n print \"Creating feature vectors for trainset...\"\n trainDependencies = getDependency(trainDepFilename)\n trainLabel, trainFeatureVectors = \\\n createFeatureVectors(trainFilename, trainDependencies)\n print \"Length of feature vector for trainset: %d\" \\\n % len(trainFeatureVectors[0])\n if not len(addTrainsetList) == 0:\n print \"Combining feature vectors of additional trainset...\"\n trainLabel, trainFeatureVectors = \\\n combineAdditionalTrainset(\n addTrainsetList, trainLabel, trainFeatureVectors)\n print \"Feature vectors of trainset created.\"\n SVMTrain(trainLabel, trainFeatureVectors, modelFilename)", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def _prediction(features, labels, threshold, size_sample, randomised, equal_pos_neg, name_kernel, custom_SVM=None):\n if custom_SVM:\n Classifier = custom_SVM\n else:\n Classifier = load_classifier(size_sample, randomised, equal_pos_neg, name_kernel)\n name_file = str(construct_name_file(size_sample, randomised, equal_pos_neg, name_kernel).split(\".json\")[0])\n return name_file, _performance(Classifier, features, labels, threshold)", "def train_model(kernel, label):\n clf = svm.SVC(kernel='precomputed')\n clf.fit(kernel,label)\n return clf", "def train_svm(data: np.ndarray, test_labels: list, test_samples: list, train_labels: list,\n train_samples: list) -> np.ndarray:\n model = svm.SVC(kernel=\"rbf\", C=1024, gamma=2)\n model.fit(get_data_by_indexes(train_samples, data), train_labels)\n prediction = model.predict(get_data_by_indexes(test_samples, data))\n print(\"SVM fitness score {0:5.2f}%\".format(\n model.score(get_data_by_indexes(test_samples, data), test_labels) * float(100)))\n return prediction", "def __init__(self, train_x, train_y, test_x, test_y, Tunning_Cs=[0.001, 0.01, 0.1, 1, 10]): \n self.Cs = Tunning_Cs\n self.train_x = train_x\n self.train_y = train_y\n self.test_x = test_x \n self.test_y = test_y\n self.model = svm.SVR(kernel='rbf', gamma='auto')", "def generate_classifier(templates_path_pattern, hyperparams):\n # Get all paths\n cars, notcars = get_templates(templates_path_pattern)\n # Extract car & non-car features\n car_features = get_features_images(cars, hyperparams)\n notcar_features = get_features_images(notcars, hyperparams)\n # Create an array stack of feature vectors\n X = np.vstack((car_features, notcar_features)).astype(np.float64)\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=rand_state)\n # Use a linear SVC\n X_scaler = StandardScaler()\n svc = LinearSVC()\n clf = Pipeline(steps=[('StandardScaler', X_scaler), ('LinearSVC', svc)])\n clf.fit(X_train, y_train)\n # Check the prediction time for a single sample\n return clf, X_test, y_test", "def classification(self,a_train,a_test,c_train,c_test,classifier):\n le =LabelEncoder()\n le.fit(c_train)\n c_train = le.transform(c_train)\n c_test = le.transform(c_test)\n if classifier==\"GNB\": #Gaussian Naive Bayes\n gnb = GaussianNB()\n gnb.fit(a_train, c_train)\n c_pred = gnb.predict(a_test)\n elif classifier==\"DT\": #Decision Tree\n dt=DecisionTreeClassifier()\n dt.fit(a_train, c_train)\n c_pred = dt.predict(a_test)\n elif classifier==\"KNN\": #K-Next-Neighbors\n kn=KNeighborsClassifier(n_neighbors=5)\n kn.fit(a_train, c_train)\n c_pred = kn.predict(a_test)\n elif classifier==\"RF\": #Random Forest\n rf=RandomForestClassifier()\n rf.fit(a_train, c_train)\n c_pred = rf.predict(a_test)\n elif classifier==\"SVC\": # Support Vector Classifier\n \"\"\"\n SVC needs normalisation of Feature Values to scale of [-1,1] or [0,1] depending on sign of them\n \"\"\"\n if a_train.min()<0:\n mms = MinMaxScaler(feature_range=(-1,1))\n else:\n mms = MinMaxScaler()\n mms.fit(a_train)\n a_train = mms.transform(a_train)\n a_test = mms.transform(a_test)\n svc=SVC(cache_size=2000,C=1, probability=True,kernel='rbf')\n svc.fit(a_train,c_train)\n #c_pred = svc.predict(a_test) did not work, that's why it is predicted manual\n new_prob = svc.predict_proba(a_test)\n samples=new_prob.shape[0]\n c_pred= np.array\n for k in range(samples):\n c_pred=np.append(c_pred,new_prob[k].argmax())\n c_pred = c_pred[1:samples+1]\n elif classifier==\"DC\": #Dummy Classifier\n dc=DummyClassifier(strategy=\"uniform\")\n dc.fit(a_train, c_train)\n c_pred = dc.predict(a_test)\n elif classifier==\"GMM\": #Gaussian Mixture Modell\n #number of existing classes get passed to the GMM (n_classes)\n n_classes_train = len(np.unique(c_train))\n n_classes_test = len(np.unique(c_test))\n if n_classes_train>n_classes_test:\n n_classes = n_classes_train\n else:\n n_classes = n_classes_test\n #init_params='', because initial values get calculated manual\n gmm = GMM(n_components=n_classes,init_params='')\n #array of feature values of class i get extracted for further process\n gmm.means_=np.array([a_train[c_train==i,:].mean(axis=0) for i in xrange(n_classes)])\n gmm.weights_=np.array([a_train[c_train==i,:].shape[0]/float(c_train.shape[0]) for i in xrange(n_classes)])\n \n gmm_covars = np.zeros((a_train.shape[1]))\n for i in xrange(n_classes):\n valuesOfClassi = a_train[c_train==i,:]\n valuesOfClassi = np.asarray(valuesOfClassi).T\n matrixOfCov = np.cov(valuesOfClassi)+gmm.min_covar*np.eye(valuesOfClassi.shape[0])\n variance = np.array([matrixOfCov[j,j] for j in xrange(matrixOfCov.shape[0])])\n gmm_covars=np.vstack((gmm_covars,variance))\n gmm_covars=gmm_covars[1:,:] #deletes initial row with zeros\n \n gmm.covars_=gmm_covars\n c_pred = gmm.predict(a_test)\n \n c_pred=le.inverse_transform(c_pred)\n return c_pred", "def svm_train_classifier(self):\n\n # needed because a SVM needs more than 1 class\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n else:\n training_data = []\n categories = []\n id = 0\n\n for gesture, value in self.saved_gestures.items():\n id += 1\n # needed to map the id returned from the SVM to a name of a gesture\n self.category_to_gesture[id] = gesture\n categories.append(id)\n\n x = []\n y = []\n z = []\n for elem in value:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n training_data.append(self.get_fft(x, y, z))\n\n # normalized length of fft\n self.cutoff_length = min([len(l) for l in training_data])\n\n normalized_fft = []\n for l in training_data:\n normalized_fft.append(l[:self.cutoff_length])\n\n training_data = normalized_fft\n\n self.classifier.fit(training_data, categories)", "def scikit_learn_classifier_comparison_example():\n\n # Code source: Gael Varoqueux\n # Andreas Mueller\n # Modified for Documentation merge by Jaques Grobler\n # Modified to serve as a MinCq example by Jean-Francis Roy\n # License: BSD 3 clause\n\n h = .02 # step size in the mesh\n\n names = [\"Linear SVM\", \"RBF SVM\", \"AdaBoost\", \"Linear MinCq\", \"RBF MinCq\", \"Stumps MinCq\"]\n classifiers = [\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n AdaBoostClassifier(),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"linear\"),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"rbf\", gamma=2),\n MinCqLearner(mu=0.01, voters_type=\"stumps\"),\n ]\n\n X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\n\n rng = np.random.RandomState(2)\n X += 2 * rng.uniform(size=X.shape)\n linearly_separable = (X, y)\n\n datasets = [make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable\n ]\n\n figure = pl.figure(figsize=(27, 9))\n i = 1\n # iterate over datasets\n for ds in datasets:\n # preprocess dataset, split into training and test part\n X, y = ds\n y[y == 0] = -1\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = pl.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\n figure.subplots_adjust(left=.02, right=.98)\n pl.show()", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def make_classifiers(NAMES) :\r\n\r\n# if len(data_shape) != 2:\r\n# raise ValueError(\"Only 2-d data allowed (samples by dimension).\")\r\n\r\n classifiers = {\r\n \"Chance\": DummyClassifier(strategy=\"most_frequent\"),\r\n \"Nearest Neighbors\": KNeighborsClassifier(3),\r\n \"Linear SVM\": LinearSVC(penalty='l2', C=1,# probability=True,\r\n class_weight='balanced'),\r\n # sahil changed the configuration from \"probability\" True to False (probability\r\n # based inference doesn't work well in SVM models from part experiences,\r\n # as SVM original algorithm just split the data with no probablistic notion of inference.)\r\n \"RBF SVM\": SVC(gamma=2, C=1, probability=False),\r\n \"Decision Tree\": DecisionTreeClassifier(max_depth=None,\r\n max_features=\"auto\"),\r\n \"Random Forest\": RandomForestClassifier(max_depth=None,\r\n n_estimators=20,\r\n max_features=\"auto\",\r\n n_jobs=PROCESSORS),\r\n \"Logistic Regression\": LogisticRegression(penalty='l1',\r\n class_weight='balanced'),\r\n \"Naive Bayes\": GaussianNB(),\r\n \"LDA\": LDA(),\r\n \"SGD_logL1\": SGDClassifier(random_state=1952,loss='log', average = 3,\r\n penalty='l1',\r\n alpha=1e-3,\r\n class_weight='balanced'),\r\n \"SGD_log_elastic\": SGDClassifier(random_state=1952,loss='log',\r\n class_weight='balanced',\r\n alpha=1e-3,\r\n average = 3,\r\n penalty='elasticnet'),\r\n \"SGD_SVM_elastic\": SGDClassifier(random_state=1952,loss='log',\r\n class_weight='balanced',\r\n average = 3,\r\n alpha=1e-3,\r\n penalty='elasticnet'),\r\n\r\n # Sahil commented the two classiifer below as not able to install the packages\r\n # \"CGC_log_L1\": CDClassifier(penalty=\"l1\",\r\n # loss=\"log\",\r\n # multiclass=False,\r\n # max_iter=200,\r\n # C=1,\r\n # tol=1e-3),\r\n # \"SDCA_SVM_elastic\": SDCAClassifier(\r\n # loss=\"hinge\",\r\n # max_iter=200,\r\n # tol=1e-3)\r\n #\r\n }\r\n\r\n params = {\r\n \"Chance\": {},\r\n # 3, 4, 5\r\n # , 6, 7, 8, 10, 12, 15, 20, 30, 50, 75, 100\r\n #\r\n #\r\n \"Nearest Neighbors\": {\"n_neighbors\": [1, 2, 3, 5, 10, 20, 50, 75, 100, 150, 200, 250]},\r\n \"Linear SVM\": {\"C\": [0.1, 0.2, 0.3, 0.4, 0.5, 1, 1.5, 2],\r\n \"loss\":['hinge', 'squared_hinge']},\r\n \"RBF SVM\": {\"kernel\": [\"rbf\"],\r\n \"gamma\": np.logspace(-2, 0, 6).tolist() + \\\r\n np.logspace(0,1,5)[1:].tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()},\r\n \"Decision Tree\": {},\r\n \"Random Forest\": {\"max_depth\": np.round(np.logspace(np.log10(2), \\\r\n 1.2, 6)).astype(int).tolist()},\r\n \"Logistic Regression\": {\"C\": np.logspace(-2, 3, 6).tolist()},\r\n \"Naive Bayes\": {},\r\n \"LDA\": {},\r\n \"SGD_logL1\": {\"alpha\": np.logspace(-5, 2, 7)},\r\n \"SGD_log_elastic\": {\"alpha\": np.logspace(-5, 2, 6),\r\n \"l1_ratio\": 10**np.array([-2, -1, -.5, -.25,\r\n -.12, -.06, -.01])},\r\n \"SGD_SVM_elastic\": {\"alpha\": np.logspace(-5, 2, 6),\r\n \"l1_ratio\": 10**np.array([-2, -1, -.5, -.25,\r\n -.12, -.06, -.01])},\r\n \"CGC_log_L1\": {\"alpha\": np.logspace(-5, 2, 6)},\r\n \"SDCA_SVM_elastic\": {\"alpha\": np.logspace(-4, 4, 5),\r\n \"l1_ratio\": 10**np.array([-3,-2, -1, np.log10(.5),\r\n np.log10(.9)])}\r\n }\r\n out_classifiers = {cname: classifiers[cname] for cname in NAMES}\r\n out_params = {cname: params[cname] for cname in NAMES}\r\n logging.info(\"Using classifiers %r with params %r\" % (out_classifiers,\r\n out_params))\r\n return classifiers, params" ]
[ "0.74245536", "0.72653663", "0.72029406", "0.71481", "0.71425575", "0.71343595", "0.7121671", "0.71049994", "0.70486885", "0.69977236", "0.6995051", "0.69354665", "0.6934409", "0.69329035", "0.6879862", "0.68783337", "0.6872857", "0.6791274", "0.67844033", "0.677671", "0.67685807", "0.6761286", "0.675765", "0.66792005", "0.6678343", "0.6666634", "0.6640107", "0.66309816", "0.6627829", "0.6613856" ]
0.7278228
1
Checks a single file and reports imbalanced quotes.
def check_file(filename): for line, imbalance in check_file_balance(filename): print '{name}:{line} {imbalance} quotes are imbalanced'.format( name=filename, line=line, imbalance=' and '.join(imbalance))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_duplicate_quotes(self):\n\n # Setup\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes8.txt\")\n\n # Call function being tested\n with self.assertRaisesRegexp(Exception, re.escape(\"a duplicate quote was found on line 5 of '{}'. Quote: \\\"The depressing thing about tennis is that no matter how good I get, I'll never be as good as a wall.\\\"\".format(path))):\n api.read_quotes(path)", "def test_read_quotes(self):\n # Setup\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n\n # Call function being tested\n quotes = api.read_quotes(path)\n\n # Check results\n self.assertEqual(4, len(quotes))", "def test_add_quote_but_file_contains_quote_already(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n quote = api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"])\n api.add_quote(path, quote)\n\n with self.assertRaisesRegexp(Exception, re.escape(\n 'the quote \"This is an added quote.\" is already in the quote file {0}.'.format(path))):\n api.add_quote(path, quote)", "def test_read_quotes_empty_file(self):\n path = os.path.join(self.tempdir, \"emptyfile.txt\")\n # Create empty file\n open(path, 'a').close()\n quotes = api.read_quotes(path)\n self.assertEqual(0, len(quotes))", "def test_read_quotes_no_final_newline(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes2.txt\")\n quotes = api.read_quotes(path)\n self.assertEqual(4, len(quotes))", "def test_read_quotes_with_double_quote_in_quotefile(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes7.txt\")\n with self.assertRaisesRegexp(Exception, re.escape(\n \"syntax error on line 2 of {0}: the quote included a (\\\") character. Line with error: \\\"They that can give up essential liberty to obtain a little temporary safety deserve neither liberty nor \\\" safety.|Ben Franklin||U\\\"\".format(path))):\n api.read_quotes(path)", "def process_file(file_path):\n\n enc = detect_bom_encoding(file_path)\n if enc is None:\n with open(file_path, 'r') as f:\n result = run_checks(file_path, f)\n else:\n with open(file_path, 'r', encoding=enc) as f:\n result = run_checks(file_path, f)\n print('Finished processing %s\\n' % file_path)\n return result", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def test_read_quotes_blank_lines(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes3.txt\")\n quotes = api.read_quotes(path)\n self.assertEqual(4, len(quotes))", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def test_read_quotes_fnf(self):\n path = os.path.join(self.tempdir, \"fakename.txt\")\n with self.assertRaisesRegexp(Exception, re.escape(\"The quote file '{0}' was not found.\".format(path))):\n api.read_quotes(path)", "def check_file(self, check_expression_file):\n with open(check_expression_file) as fh:\n for check_expression_str in fh:\n self.check_expression(check_expression_str.strip())", "def test_onlyQuotes(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"quotefile\"], cwd=self.repo.path)\n\n fun = self.repo.child(\"docs\").child(\"fun\")\n fun.makedirs()\n quotes = fun.child(\"Twisted.Quotes\")\n quotes.setContent(b\"Beep boop\")\n\n runCommand([\"git\", \"add\", quotes.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"quotes\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Quotes change only; no newsfragment needed.\")", "def test_read_quotes_commented_lines(self):\n\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes4.txt\")\n quotes = api.read_quotes(path)\n self.assertEqual(4, len(quotes))", "def checkQuotes(line):\n\n for x in line:\n if x in ('\\'', '\"'):\n if line.count('\\'') % 2 == 0 and line.count('\"') % 2 == 0:\n return True\n else:\n return False", "def test_add_quote(self):\n\n # Set up for test\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes5.txt\")\n quote = api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"])\n\n # Call method being tested\n api.add_quote(path, quote)\n\n # Check results. Read the resulting text file and verify\n with open(path, 'rb') as file:\n data = file.read()\n text_data = data.decode('utf-8')\n expected = u'They that can give up essential liberty to obtain a little temporary safety deserve neither liberty nor safety. | Ben Franklin | | U' + os.linesep + \\\n u'This is an added quote. | Another author | Publication | tag1, tag2' + os.linesep\n self.assertEquals(expected, text_data)", "def check_file(filename):\n f = open(filename, 'r')\n html = f.read()\n f.close()\n\n tip = TIParser(filename, html)\n tip.feed(html)\n tip.finish()", "def scan_file(filename):\n bad_lingo = helpers.multiline_to_words()\n lingo_count = len(bad_lingo)\n scanfile = filename\n bad = 0\n with open(scanfile, 'r') as datafile:\n for line in datafile:\n for bad_word in bad_lingo:\n if bad_word in line:\n bad += 1\n bad_file_estimate = bad / len(bad_lingo) * 100\n helpers.clean_workdir()\n return bad_file_estimate", "def test_add_quote_but_file_not_found(self):\n quote = api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"])\n quotefile = os.path.join(self.tempdir, \"fakename.txt\")\n\n with self.assertRaisesRegexp(Exception, re.escape(\"The quote file '{0}' does not exist.\".format(quotefile))):\n api.add_quote(quotefile, quote)", "def check_file(filename):\n\tfile = open(filename, 'r')\n\tfile_content = file.read()\n\tif len(file_content) < 3 or file_content.isspace():\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')\n\t# First 3 characters should represent the base of the content.\n\tbase = file_content[0:3]\n\tfile_content = file_content[3:]\n\tforbidden_chars = {'BIN': [None], 'HEX': [None]}\n\n\t# Content is claimed to be hexadecimal:\n\tif base == 'HEX':\n\t\tfile_content = ''.join(file_content.split())\n\t\tfile_content = file_content.upper()\n\t\tif len(file_content) < 2:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 2\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (2 hex digits = 1 byte)!')\n\t\t# Use regular expression for verifying the content.\n\t\tif re.match('[0-9A-F]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 2):\n\t\t\t\tif start + 2 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+2] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (1, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid hexadecimal data!')\n\t\t\t\n\t# Content is claimed to be binary:\n\telif base == 'BIN':\n\t\tfile_content = ''.join(file_content.split())\n\t\tif len(file_content) < 8:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 8\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (8 bits = 1 byte)!')\n\t\t\t\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[0-1]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 8):\n\t\t\t\tif start + 8 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+8] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['BIN']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (2, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid binary data!')\n\t\t\t\n\t# Content is claimed to be ASCII:\n\telif base == 'ASC':\n\t\tescape_chars = ['\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v']\n\t\tescape_letters = ['a', 'b', 'f', 'n', 'r', 't', 'v']\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[\\x00-\\x7F]+$', file_content):\t\t# [\\x20-\\x7E]\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor c in file_content:\n\t\t\t\tif binascii.hexlify(c).upper() in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File contains illegal control characters!')\n\t\t\tfor c in escape_chars:\n\t\t\t\tif file_content.count(c) != 0:\n\t\t\t\t\tfile_content = file_content.replace(c, '')\t\t\t\t\t\n\t\t\t# Replace all \"\\\\n\", \"\\\\r\" etc. with \"\\n\", \"\\r\" etc. (i.e. remove\n\t\t\t# the extra backslash) so that the control characters are interpreted\n\t\t\t# correctly into hex values.\n\t\t\tfor c in range(0, len(file_content)):\n\t\t\t\tif file_content[c:c+1] == '\\\\':\n\t\t\t\t\tif file_content[c+1:c+2] in escape_letters:\n\t\t\t\t\t\tfor e in escape_letters:\n\t\t\t\t\t\t\tif file_content[c+1:c+2] == e:\n\t\t\t\t\t\t\t\tfile_content = file_content[:c] + escape_chars[escape_letters.index(e)] + file_content[c+2:]\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn (0, 'File contains illegal control characters!\\n\\n' + \n\t\t\t\t\t\t\t\t'Legal characters after a backslash are: a, b, f, n, r, t, and v.')\n\n\t\t\t# Return type indicator and the file content.\n\t\t\tfile.close()\n\t\t\treturn (3, file_content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid ASCII data!')\n\t\t\n\t# Content is invalid:\n\telse:\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')", "def test_pep8_conformance(self):\n\n files = self.get_files()\n ignore = [\n \"E501\", # skip line length check because 79 is not enough\n \"W504\" # we choose this from 503 and 504\n ]\n pep8style = pycodestyle.StyleGuide(quiet=True, ignore=ignore)\n file_check = pep8style.check_files(files)\n if file_check.total_errors > 0:\n print(\"GET {} ERRORS\".format(file_check.total_errors))\n pep8style = pycodestyle.StyleGuide(quiet=False, ignore=ignore)\n pep8style.check_files(files)\n\n self.assertEqual(0, file_check.total_errors, \"some file contains errors. To skip line use # noqa\")", "def test_file(self, file: CollectedFile):\n\n return file.filename[-3:].upper() == 'TXT'", "def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")", "def test_write_quotes_fnf(self):\n\n # Setup: create a pathname to file that does not exist\n path = os.path.join(self.tempdir, \"fakename.txt\")\n quote = api.Quote(\"Another new quote\", \"author\", None, [])\n quotes = [quote]\n\n # Call function under test, check that exception raised\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote file '{0}' was not found.\".format(path))):\n api.write_quotes(path, quotes)", "def test_md027_bad_block_quote_thematic():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md027\", \"bad_block_quote_thematic.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:3:3: \"\n + \"MD027: Multiple spaces after blockquote symbol (no-multiple-space-blockquote)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "def fileCheck(file):\n if not os.path.isfile(file):\n print('File : ',file)\n print('E... '+'no file')\n sys.exit()", "def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))", "def test_bad_assumption_file(reformfile1, assumpfile_bad1, assumpfile_bad2):\n input_stream = StringIO(RAWINPUTFILE_CONTENTS)\n input_dataframe = pd.read_csv(input_stream)\n taxyear = 2022\n with pytest.raises(ValueError):\n TaxCalcIO(input_data=input_dataframe,\n tax_year=taxyear,\n reform=reformfile1.name,\n assump=assumpfile_bad1.name,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)\n with pytest.raises(ValueError):\n TaxCalcIO(input_data=input_dataframe,\n tax_year=taxyear,\n reform=reformfile1.name,\n assump=assumpfile_bad2.name,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)", "def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()", "def correct(filename):\n with open(filename) as f:\n html = f.read()\n lines = html.split('\\n')\n\n for line in lines:\n l = re.findall(r'<[^>]+>', line)\n if len(l) == 1: continue # deal with lines containing only 1 tag\n correct = check_tags(l)\n if not correct:\n print \"Incorrect:\", line" ]
[ "0.64514214", "0.63728905", "0.6251669", "0.61318374", "0.6038755", "0.6014769", "0.59573436", "0.5918087", "0.5877048", "0.57132167", "0.56868976", "0.56821746", "0.5636283", "0.55373394", "0.5532979", "0.55020666", "0.5496599", "0.54439133", "0.5369307", "0.5299016", "0.529695", "0.52961195", "0.52900416", "0.5242057", "0.52401423", "0.5225497", "0.5221437", "0.52181906", "0.51957625", "0.51812345" ]
0.7980774
0
Check all matching files in the directory, recursive or not.
def check_directory(directory, file_exts, recursive): for filename in tree_walker(directory, file_exts, recursive=recursive): check_file(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_files(folder_path):\n for dataset_file in listdir(folder_path):\n verify_file(folder_path + dataset_file)", "def _match_all(abs_dir, matching, not_matching):\n num_not_matching = 0\n\n for expression in matching:\n if not fnmatch.fnmatch(abs_dir, expression):\n num_not_matching += 1\n\n if num_not_matching == len(matching):\n return False\n\n for expression in not_matching:\n if fnmatch.fnmatch(abs_dir, expression):\n return False\n\n return True", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def allfiles(dir, pattern=\"*\"):\n\tdir = uniformpath(dir)\n\tif not os.path.isdir(dir): # must be file\n\t\treturn [dir]\n\tmatching_files = []\n\tfor root, subFolders, files in os.walk(dir):\n\t\tmatching = fnmatch.filter(files, pattern)\n\t\tmatching_files.extend(os.path.join(root, f) for f in matching)\n\treturn matching_files", "def _recursive_file_search(self, path, pattern):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches", "def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True", "def _findFilesInDir(self, directory, extension=\".py\", foundFiles=None):\n\n #mutable default arguments in Python are evaluated once when the function is defined, not each time the function is called.\n if foundFiles == None:\n foundFiles = []\n \n filenames = os.listdir(directory)\n for filename in filenames:\n #need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0:1] != '.' and os.path.isfile(directory + '/' + filename):\n foundFiles.append(directory + '/' + filename)\n print ('===>' + filename)\n return foundFiles", "def scan_dir(self, directory=\".\"):\n for root, dirs, files in os.walk(directory, topdown=False):\n for name in files:\n for filetype in self.allowed_file_types:\n if name.split(\".\")[-1] == filetype:\n self.song_list.append(os.path.join(root, name))", "def checkfiles(args):\n\n from .query import Database\n db = Database()\n\n r = db.objects(\n protocol=args.protocol,\n support=args.support,\n groups=args.group,\n purposes=args.purposes,\n gender=args.gender,\n clients=args.client,\n )\n\n # go through all files, check if they are available on the filesystem\n good = []\n bad = []\n for f in r:\n if os.path.exists(f.make_path(args.directory, args.extension)):\n good.append(f)\n else:\n bad.append(f)\n\n # report\n output = sys.stdout\n if args.selftest:\n from bob.db.base.utils import null\n output = null()\n\n if bad:\n for f in bad:\n output.write('Cannot find file \"%s\"\\n' % (f.make_path(args.directory, args.extension),))\n output.write('%d files (out of %d) were not found at \"%s\"\\n' % \\\n (len(bad), len(r), args.directory))\n\n return 0", "def recursive_glob(self, rootdir='.', suffix=''):\n\n valid_image_files = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_path = os.path.join(looproot, filename)\n label_path = image_path.replace(\"images\", \"labels\").replace(\"bmp\", \"txt\")\n if os.path.isfile(label_path):\n valid_image_files.append(image_path)\n\n return valid_image_files", "def find_files(directory='.', pattern='.*', recursive=True):\n if recursive:\n return (os.path.join(directory, filename)\n for directory, subdirectories, filenames in os.walk(directory)\n for filename in filenames if re.match(pattern, filename))\n else:\n return (os.path.join(directory, filename)\n for filename in os.listdir(directory)\n if re.match(pattern, filename))", "def iglob_recursive(directory, file_pattern):\n for root, dir_names, file_names in os.walk(directory, followlinks=True):\n files = fnmatch.filter(file_names, file_pattern)\n for filename in files:\n yield os.path.join(root, filename)", "def find_files(directory, patterns):\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if \".pyc\" not in basename and \"__pycache__\" not in basename:\n for pattern in patterns:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def _findFiles(self, topLevelDirectory, extension=\".py\", foundFiles=None):\n \n #mutable default arguments in Python are evaluated once when the function is defined, not each time the function is called.\n if foundFiles == None:\n foundFiles = []\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n for filename in filenames:\n #need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0:1] != '.' and os.path.isfile(dirpath+\"/\"+filename):\n foundFiles.append(dirpath+\"/\"+filename)\n #print dirpath+\"/\"+filename\n return foundFiles", "def _files_in_subdir(self, subdir, pattern, regex):\n all_files = glob(join(subdir, (pattern or '**')), recursive=True)\n all_files = [fp for fp in all_files if isfile(fp)]\n\n if pattern and regex:\n raise ValueError(\"Specify pattern OR regex, not both!\")\n elif pattern:\n files = [fn for fn in glob(join(subdir, pattern), recursive=True)]\n elif regex:\n files = [fn for fn in all_files if re.search(regex, fn)]\n else:\n files = all_files\n\n return sorted(files)", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def check_file_pattern_dir(filepath, file_patterns):\n dirs_with_session_files = []\n child_dirs = [x[0] for x in os.walk(filepath)]\n patterns_found = 0\n for dir in child_dirs:\n for pat in file_patterns:\n found_file = glob.glob(dir+pat)\n if len(found_file) > 0:\n patterns_found = 1\n break\n\n if patterns_found:\n break\n\n if patterns_found:\n return 1\n else:\n return 0", "def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load", "def find(directory, slash='/', pattern=r'.+\\.out'):\n for directory, subdirectories, files in os.walk(directory):\n for file in files:\n if re.findall(pattern, str(file)):\n yield str(directory + slash + file)", "def open_files(self, path):\n # Strips trailing wildcard\n if path.endswith('*'):\n path = path[:-1]\n\n if os.path.isdir(path):\n return False\n\n self.logger.debug('Opening files matched by {0}'.format(path))\n info = iohelper.get_fileinfo(path)\n ext = iohelper.get_hdu_suffix(info.numhdu)\n files = glob.glob(info.filepath) # Expand wildcard\n paths = ['{0}{1}'.format(f, ext) for f in files]\n\n self.load_paths(paths)\n return True", "def check_programs_in_directory(directory):\n files = [f for f in os.listdir(directory) if f.endswith(DECAF_SUFFIX)]\n files.sort()\n files = [os.path.join(directory, f) for f in files]\n\n all_passed = True\n for f in files:\n if not check_return_value(f):\n all_passed = False\n\n return all_passed", "def walk_recursive(root, pattern='*.py'):\r\n for root, dirnames, filenames in os.walk(root):\r\n for filename in fnmatch.filter(filenames, pattern):\r\n yield os.path.join(root, filename)", "def check_tree(cls, root, require_init_py=True):\n if os.path.basename(root) == '__pycache__':\n return\n\n if not os.path.isfile(os.path.join(root, '__init__.py')):\n # Not a Python package directory\n if require_init_py:\n raise util.UserError(\n f\"No __init__.py file in '{root}'\")\n else:\n return\n\n # First, check for the case where a .py file and a directory\n # have the same name (without the extension). This can't be\n # handled, so just raise an exception\n found = set()\n for filename in os.listdir(root):\n path = os.path.join(root, filename)\n if os.path.isfile(path):\n filename, ext = os.path.splitext(filename)\n if ext == '.py':\n found.add(filename)\n\n for dirname in os.listdir(root):\n path = os.path.join(root, dirname)\n if os.path.isdir(path):\n if dirname in found:\n raise util.UserError(\n \"Found a directory and python file with same name in \"\n \"benchmark tree: '{0}'\".format(path))\n cls.check_tree(path, require_init_py=False)", "def _find_files(directory: str, pattern: str) -> Iterator[str]:\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if _is_file_valid(d)]\n for basename in sorted(files):\n if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def check_files(fileglob, parser=None, detail=1):\n if parser is None:\n parser = get_parser()\n failed = []\n for tcd_file in sorted(glob.glob(fileglob)):\n print(f'### {tcd_file}', end='')\n try:\n parsed = check_file(tcd_file, show=False, parser=None)\n if detail >= 1:\n print(': OK')\n if detail >= 2:\n print(parsed.pretty())\n # Here, we indeed do want to surface any exception whatsoever.\n except Exception as exn: # pylint:disable=broad-except\n if detail >= 1:\n print(f': FAILED {exn!r}')\n failed.append(tcd_file)\n return failed", "def files_matching(self, dt=None):\n #Use os.walk. If descend is False, only continue for matching\n #the re to this point. If True, compare branch to entire re but\n #walk everything\n for d in self.directories:\n for (dirpath, dirnames, filenames) in \\\n os.walk(d, topdown=True, followlinks=True):\n #dirpath is FULL DIRECTORY to this point\n relpath = dirpath[len(d) + 1:]\n if not self.descend:\n if relpath and not \\\n self.file_fmt.match(relpath, dt, 'start'):\n continue\n for i in range(-len(dirnames), 0):\n if not self.file_fmt.match(os.path.join(\n relpath, dirnames[i]), dt, 'start'):\n del dirnames[i]\n for f in filenames:\n if self.file_fmt.match(os.path.join(relpath, f), dt,\n 'end' if self.descend else None):\n yield os.path.join(dirpath, f)", "def scan_directory(self, dirname):\n if not dirname:\n dirname = os.getcwd()\n\n if os.path.exists(dirname):\n for item in os.listdir(dirname):\n item_path = os.path.join(dirname, item)\n if os.path.isfile(item_path):\n self.file_confidence.append(self.confidence(item_path))\n else:\n raise FileNotFoundError('Directory does not exist. Change your path and try again')", "def scan_folder(self, folder, results):\n\n logging.info(\"Searching for xml files in the '{0}' folder.\".format(folder))\n\n for child in os.listdir(folder):\n\n pathname = os.path.join(folder, child)\n\n # child is a file\n if os.path.isfile(pathname):\n\n # check if this is a valid tests results html file\n if self.xml_regex.match(child):\n\n self.extract_info(pathname, results)\n\n # child is a folder\n else:\n\n if self.recursive:\n\n self.scan_folder(pathname, results)", "def find_files(base_dir, consumer_q):\n for dirpath, dirs, files in os.walk(base_dir, topdown=False):\n for f in files:\n fullpath = os.path.join(dirpath, f)\n if os.path.isfile(fullpath):\n consumer_q.put(fullpath)", "def find_files(basedir, regexp):\n regexp = re.compile(regexp)\n return sorted(fn for fn in glob.glob(os.path.join(basedir, '**'),\n recursive=True)\n if regexp.match(fn))" ]
[ "0.68475485", "0.6748209", "0.6720091", "0.65586984", "0.64987665", "0.6392444", "0.6379973", "0.6374137", "0.63139343", "0.62979114", "0.62551415", "0.6251213", "0.6240236", "0.6220307", "0.6212094", "0.6209517", "0.6161811", "0.616029", "0.6156458", "0.6141434", "0.61388713", "0.6110766", "0.61025244", "0.60820657", "0.6074527", "0.6061286", "0.6047272", "0.6030858", "0.60303026", "0.6024944" ]
0.7785399
0
Find the start position of the maze
def findStart(maze): start_Position = 0 for i in range(0, len(maze)): for j in range(0, len(maze[0])): if maze[i][j] == 'P': start_Position = i * len(maze[0]) + j return start_Position return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_start_node(self) -> MazeCell:\n return self._start_node", "def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move", "def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc", "def maze_position(self):\n pos = self._env.observations()['DEBUG.POS.TRANS']\n x, y = self._to_maze_coord(pos[0], pos[1])\n return np.array([x, y])", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def starting_position(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"starting_position\")", "def start(self) -> pos.Pos:\n return self.__start", "def starting_position(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"starting_position\")", "def __find_start(puzzle):\n for i in range(len(puzzle)):\n for j in range(len(puzzle[0])):\n if puzzle[i][j] == 0:\n return i\n return 0", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def get_start_point(self):\n return self.first_point", "def find_startpos(self, searched_object:str):\r\n fak = 1 #< When the figure needs to be pushed to the right -> fak = 1 else fak = 0\r\n # The main figures spwan position beginns at index 14 and ends at size(self.look_up_table) - 9\r\n start_index = 14\r\n y = start_index \r\n end_index = -9\r\n for x in self.look_up_table[start_index : end_index]:\r\n # When the serached object is in the row then get the index of it\r\n if searched_object in x:\r\n x = x.index(searched_object)\r\n break\r\n y += 1\r\n # Pac-Man does not need to push to the right\r\n if searched_object == 'PACMAN':\r\n fak = 0\r\n return x * self.grid_size + fak * self.grid_size // 2, y * self.grid_size", "def start(self) -> global___Pos:", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def tinyMazeSearch(problem):\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState()) # delete this later, otherwise the start state\n # will count as expanded twice!\n print 'problem', problem\n\n\n from pac.game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [e, e, w, s, w, w, s, w]", "def get_start_cell(self):\n return (self.st_row, self.st_col)", "def findEnd(maze):\n final_Position = 0\n for i in range(0, len(maze)):\n for j in range(0, len(maze[0])):\n if maze[i][j] == '.':\n final_Position = i * len(maze[0]) + j\n return final_Position\n return -1", "def init_position():\n __maze.init_position()", "def start_loc(self) -> str:\n return self._start_loc", "def hgvs_start(self):\n try:\n return self.hp.parse(self.term).posedit.pos.start\n except hgvs.exceptions.HGVSParseError:\n # Log me\n # print(self.term)\n return None", "def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:\n # Shortcut for accessing maze\n maze_idx = lambda p: p[1] * width + p[0]\n\n # Shortcut funcs for surrounding points\n north = lambda p: (p[0] , p[1] -1)\n east = lambda p: (p[0] +1, p[1] )\n south = lambda p: (p[0] , p[1] +1)\n west = lambda p: (p[0] -1, p[1] )\n\n def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points\n\n # First, connect to a random neighbour that has been visited.\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n\n step = start\n\n # Walk randomly until out of options\n while possible_n := check_neighbours(step):\n next_step, direction = random.choice(tuple(possible_n.items()))\n\n # Connect the two cells\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n\n # Go to next\n step = next_step", "def start_location(self) -> Point2:\n return self._game_info.player_start_location", "def find_player(self):\n for y, line in enumerate(self.maze):\n for x, character in enumerate(line):\n if character == \"m\":\n return y, x\n return None", "def get_pre_start_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n pre_start_coordinate = (self.__location[0] - 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n pre_start_coordinate = (self.__location[0],\r\n self.__location[1] - 1)\r\n return pre_start_coordinate", "def get_pos(self):\n return self.rect.midtop", "def get_min_position(self):\n raise NotImplementedError()", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')" ]
[ "0.73054814", "0.70507145", "0.6931038", "0.6863873", "0.66988903", "0.6598483", "0.6526041", "0.6505516", "0.649952", "0.6399859", "0.6399859", "0.6399859", "0.637604", "0.6371917", "0.6324958", "0.6307789", "0.6246911", "0.62361574", "0.6212202", "0.6210481", "0.614648", "0.61449826", "0.61413026", "0.6141174", "0.6134359", "0.6120287", "0.6104251", "0.60525155", "0.60349375", "0.60303026" ]
0.79864275
0
Find the end position of the maze
def findEnd(maze): final_Position = 0 for i in range(0, len(maze)): for j in range(0, len(maze[0])): if maze[i][j] == '.': final_Position = i * len(maze[0]) + j return final_Position return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_end_node(self) -> MazeCell:\n return self._end_node", "def get_end_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc_after(\n self.raw,\n )", "def findStart(maze):\n start_Position = 0\n for i in range(0, len(maze)):\n for j in range(0, len(maze[0])):\n if maze[i][j] == 'P':\n start_Position = i * len(maze[0]) + j\n return start_Position\n return -1", "def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]", "def last_position(self):\n return self.visited_positions[-1]", "def end(self) -> pos.Pos:\n return self.__end", "def backtrack_to_start_to_draw_purpose(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n\r\n return path", "def maze_solver_rec(maze, start, end):\r\n def find_path(maze, pos, end):\r\n mark(maze, pos)\r\n if pos == end:\r\n print(pos, end=' ')\r\n return True\r\n for i in range(4):\r\n nextp = pos[0]+dirs[i][0], pos[1]+dirs[i][1]\r\n if passable(maze, nextp):\r\n if find_path(maze, nextp, end):\r\n print(pos, end=' ')\r\n return True\r\n return False\r\n\r\n print(\"If find, print the path from end to start:\")\r\n if find_path(maze, start, end):\r\n print(\"\\n\")\r\n else:\r\n print(\"No path exists.\")", "def get_end_cell(self):\n return (self.end_row, self.end_col)", "def _get_end_index(self):\n return max(self.index + self.source_window,\n self._get_target_index() + self.target_window)", "def _get_end(self):\n return self._end", "def get_rolling_stop_positions(self, maze):\n rows, cols = len(maze), len(maze[0])\n [up, left, down, right] = [[[0 for _ in xrange(cols)] for _ in xrange(rows)] for _ in xrange(4)]\n stop_positions = [[[[0, 0] for _ in xrange(4)] for _ in xrange(cols)] for _ in xrange(rows)]\n for r in xrange(rows):\n for c in xrange(cols):\n up[r][c] = r if r == 0 or maze[r-1][c] == 1 else up[r-1][c]\n left[r][c] = c if c == 0 or maze[r][c-1] == 1 else left[r][c-1]\n\n for r in xrange(rows - 1, -1, -1):\n for c in xrange(cols - 1, -1, -1):\n down[r][c] = r if r == rows - 1 or maze[r+1][c] == 1 else down[r+1][c]\n right[r][c] = c if c == cols - 1 or maze[r][c+1] == 1 else right[r][c+1]\n stop_positions[r][c] = [[up[r][c], c], [r, left[r][c]], [down[r][c], c], [r, right[r][c]]]\n\n return stop_positions", "def position_last(self):\n return self._position_last", "def pathFinder(M, start, end):\r\n point = M[start-1][end-1]\r\n if point != 0:\r\n pathFinder(M, start, point)\r\n print \"V\" + str(point)\r\n pathFinder(M, point, end)", "def _get_end_of_lane(self):\n\n end_of_lanes = np.zeros(len(self.grid.T), dtype=np.int)\n for idx, lane in enumerate(self.grid.T):\n empty_space_in_lanes = np.argwhere(lane != 0)\n if empty_space_in_lanes.size != 0:\n end_of_lanes[idx] = empty_space_in_lanes[-1] + 1\n\n if self.grid.T[idx][-1] != 0:\n end_of_lanes[idx] = -1\n return end_of_lanes", "def maze_position(self):\n pos = self._env.observations()['DEBUG.POS.TRANS']\n x, y = self._to_maze_coord(pos[0], pos[1])\n return np.array([x, y])", "def _generate_end_position(self):\n end_position = []\n new_row = []\n\n for i in range(1, self.PUZZLE_NUM_ROWS * self.PUZZLE_NUM_COLUMNS + 1):\n new_row.append(i)\n if len(new_row) == self.PUZZLE_NUM_COLUMNS:\n end_position.append(new_row)\n new_row = []\n\n end_position[-1][-1] = 0\n return end_position", "def solve_maze(self):\n initial_maze_loc = self.maze.location\n curr_coord = initial_maze_loc\n solution_path_directions = []\n #print(\"in solve_maze:\")\n\n # The agent always chooses the next location with the highest Q value.\n # With this strategy, the agent aims to reach the goal using the\n # most optimal path possible.\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n possible_moves = self.maze.moves()\n\n # Find the next best move.\n best_next_move = (0,0)\n best_next_move_q = float('-inf')\n for move in possible_moves:\n if self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]] >= best_next_move_q:\n best_next_move = move\n best_next_move_q = self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]]\n\n direction = self.maze.moves_to_dirs[best_next_move]\n solution_path_directions.append(direction)\n curr_coord = (curr_coord[0]+best_next_move[0], curr_coord[1]+best_next_move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc # reset maze location to initial coord.\n\n return solution_path_directions", "def solve_maze(self, maze):\n \"\"\"this means: always follow a given path to a junction and from there try a direction at random\"\"\"\n \"\"\"caution: this algorithms may take longer than anticipated (since it is random)\"\"\"\n self.maze = maze\n self.path = []\n if maze.get_entrance() is None or maze.get_exit() is None:\n self.log.error('Entrance or Exit is missing')\n raise Exception('Entrance or Exit is missing')\n cell = maze.get_entrance()\n self.path.append(cell)\n while self.path[-1] != maze.get_exit():\n self.__decide_next__()\n self.__clean_path__()\n return self.path", "def get_end(self):\n return self.__end", "def getEnd(self) -> long:\n ...", "def get_next_position(self):", "def go_to_exit(self):\n ys = [self.currY]\n xs = [self.currX]\n options = np.zeros((self.h, self.w), np.uint8)\n visited = np.zeros((self.h, self.w), np.bool_)\n visited[self.currY, self.currX] = True\n distance = 1\n while True:\n while len(ys) > 0:\n cur = (ys.pop(), xs.pop())\n for d, m in enumerate(self.__get_map_offsets()):\n if (m[cur[0], cur[1]] > 1) and (\n not visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]]):\n options[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = distance\n visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = True\n if (cur[0] + self.directions[d][0] == self.exitY) and (\n cur[1] + self.directions[d][1] == self.exitX):\n return self.__convert_to_path_exit(options)\n yTemp, xTemp = np.where(options == distance)\n ys += yTemp.tolist()\n xs += xTemp.tolist()\n distance += 1", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def end(self, finish=None):\n return self.bounds(finish=finish)[1]", "def startEndPoints(mazz):\n for i in range (len(mazz)):\n for j in range (len(mazz[i])):\n if mazz[i][j] == 6:\n startx = i\n starty = j\n elif mazz[i][j] == 7:\n endx = i\n endy = j\n return startx, starty, endx, endy", "def get_end(self):\n return self._end", "def end (self):\n return self._end if self._end != self.inf else self.e", "def last(self):\n return self._make_position(self._trailer._prev)" ]
[ "0.717331", "0.6836621", "0.67375916", "0.662352", "0.6482371", "0.63844573", "0.6366002", "0.6249933", "0.61706", "0.6140208", "0.61023366", "0.6072212", "0.60102415", "0.60079265", "0.60019976", "0.6000571", "0.59502167", "0.5941163", "0.5914859", "0.58956844", "0.5878427", "0.5876593", "0.58733255", "0.5863873", "0.58531314", "0.58529043", "0.58520246", "0.58286357", "0.58009654", "0.5797764" ]
0.82256186
0
Find the number of goals
def num_of_goals(maze): count = 0 for i in range(0, len(maze)): for j in range(0, len(maze[0])): if maze[i][j] == '.': count += 1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_goal_done(self, season=None, stage=None):\n cnt = 0\n matches = self.get_matches(season=season, ordered=True)\n for m in matches:\n if util.is_None(m.goal):\n continue\n if not util.is_None(stage) and m.stage >= stage:\n return cnt\n soup = BeautifulSoup(m.goal, \"html.parser\")\n for player1 in soup.find_all('player1'):\n if int(str(player1.string).strip()) == self.player_api_id:\n cnt += 1\n return cnt", "def get_assist_done(self, season=None, stage=None):\n cnt = 0\n for m in self.get_matches(season=season, ordered=True):\n if util.is_None(m.goal):\n continue\n if not util.is_None(stage) and m.stage >= stage:\n return cnt\n soup = BeautifulSoup(m.goal, \"html.parser\")\n for player1 in soup.find_all('player2'):\n if int(str(player1.string).strip()) == self.player_api_id:\n cnt += 1\n return cnt", "def num_trials(self):", "def goals(self):\n return self.problem.goals", "def count():", "def get_goal_received(self, season=None, stage=None):\n cnt = 0\n current_team = self.get_current_team()\n matches = self.get_matches(season=season, ordered=True)\n for m in matches:\n if not util.is_None(stage) and m.stage >= stage:\n return cnt\n if m.home_team_api_id == current_team.team_api_id:\n cnt += m.away_team_goal\n else:\n cnt += m.home_team_goal\n return cnt", "def heuristic(self, state: ODState) -> int:\n h = 0\n if self.assigned_goals is None:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, agent.color)\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, state.agents[j].color)\n else:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, self.assigned_goals[agent.id])\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, self.assigned_goals[state.agents[j].id])\n return h", "def GOAL_TOTAL() -> int:\n return 21", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def howManyGames(p, d, m, budget):\n cost = p\n i = 0\n add = 0\n while budget >= cost:\n budget = budget - cost\n add += cost\n print(add, cost)\n\n i += 1\n\n if (p - i * d) <= m:\n cost = m\n else:\n cost = p - i * d\n print(i)\n return i", "def goals(self):\n return self._goals", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def get_num_goats(self) -> int:\n return len(self.get_all_goat_positions())", "def hits(self):\n return len(self.successes) + len(self.failures)", "def number_moves(game, player):\n return float(len(game.get_legal_moves(player)))", "def distance_from_goal(self, state):\n empty_finder_regex = re.compile('\\{}'.format(state.empty_token))\n possible_goal = ['*'] * len(empty_finder_regex.findall(state.text))\n possible_goal += self.desired_arrangement\n possible_goal += ['*'] * (len(state.text) - len(possible_goal))\n\n distance = 0\n for index, bin in enumerate(state.configuration):\n if bin != possible_goal[index]:\n distance += 1\n\n return distance", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def count() -> int:\n pass", "def count_gold(pyramid):\n\n #replace this for solution\n return 0", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def count(self):\n # TODO not implemented yet\n return 0", "def nbr_tours(self):\n nbr_tours = 0\n for i in range(3):\n for j in range(3):\n if self.grille[i][j] != 0:\n nbr_tours += 1\n return nbr_tours", "def goals():\n rand_nmr = random.random()\n if rand_nmr < 0.5:\n return 1\n elif rand_nmr < 0.8:\n return 2\n elif rand_nmr < 0.97:\n return 3\n else:\n return 4", "def obstacle_count(self):\n found_something = False\n count = 0\n starting_postion = self.get_heading()\n self.right(primary=60, counter=60)\n time.sleep(0.5)\n while self.get_heading() != starting_postion:\n if self.read_distance() < 250 and not found_something:\n found_something = True\n count += 1\n print (\"I found something\")\n elif self.read_distance() > 250 and found_something:\n found_something = False\n print(\"I have a clear view\")\n self.stop()\n\n print(\"I have found this many things: %d\" % count)\n return count", "def count_balls(self, **kwargs):\n return 0", "def projects_count(args):\n session = GithubSession()\n\n print(f\"counting {args.name}\")\n\n board = session.get_project(args.name)\n\n tally = []\n\n columns = session.get_columns(board)\n for column in columns:\n print(column[\"name\"], file=sys.stderr)\n\n cards = list(session.get_cards(column))\n\n total = Decimal(0)\n unpointed = 0\n num_cards = 0\n num_walk_ins = 0\n issues = []\n walk_ins = []\n walk_in_points = 0\n\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n if not issue_number: # must be a note\n continue\n\n issue_data = session.get_issue(issue_number)\n labels = issue_data[\"labels\"]\n\n num_cards += 1\n\n points = get_points(labels)\n if points:\n total += points\n else:\n unpointed += 1\n\n issue_data = {\n \"issue_number\": issue_number,\n \"points\": str(points),\n \"unpointed\": points is None,\n \"walk_in\": False,\n }\n\n if is_walk_in(labels):\n num_walk_ins += 1\n if points:\n walk_in_points += points\n\n issue_data[\"walk_in\"] = True\n\n walk_ins.append(issue_data)\n\n issues.append(issue_data)\n\n tally.append(\n {\n \"column\": column[\"name\"],\n # 'issues': issues,\n \"num_cards\": num_cards,\n \"num_walk_ins\": num_walk_ins,\n \"walk_in_points\": str(walk_in_points),\n # 'walk_ins': walk_ins,\n \"total_points\": str(total),\n \"unpointed\": unpointed,\n }\n )\n\n print(json.dumps(tally, indent=4))", "def get_correct_lap_count(self):" ]
[ "0.7134005", "0.6836646", "0.680982", "0.66096485", "0.66093063", "0.65508413", "0.641527", "0.63940704", "0.63724166", "0.63724166", "0.63724166", "0.63724166", "0.62400764", "0.6205538", "0.61994475", "0.61957324", "0.61829734", "0.6176286", "0.61710906", "0.61667156", "0.61644137", "0.61595845", "0.61594176", "0.61492825", "0.6113684", "0.6097616", "0.60905576", "0.6084457", "0.6078158", "0.60766065" ]
0.712693
1
This function finds the nearest fruit of current position
def closest_fruit(maze, currX, currY, fruit_list): curr_min = sys.maxsize for position in fruit_list: distance = Astar(maze, currX, currY, position[0], position[1]) if distance < curr_min: curr_min = distance return curr_min
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closestFood(pos, food, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a food at this location then exit\n if food[pos_x][pos_y]:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no food found\n return None", "def closestFood(pos, food, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a food at this location then exit\n if food[pos_x][pos_y]:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no food found\n return None", "def FindClosestPoint(self, ):\n ...", "def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n return search.bfs(problem)", "def get_closest(list_of_nearby, favorite_place):\n\tref_rating = float(favorite_place[\"rating\"]) # this is a float\n\tref_price_len = len(favorite_place[\"price\"]) # this is the length of the dollar sign - an int\n\tref_categ = favorite_place[\"categories\"] # this is a string!\n\n\tfor item in list_of_nearby:\n\t\tscore = 0\n\t\tlist_of_cat_words = item[categories].split()\n\t\tfor word in list_of_cat_words:\n\t\t\tif word in ref_categ:\n\t\t\t\tscore += 1\n\t\tscore = score * 5\n\t\tscore = score - 2 * abs(len(item[\"price\"]) - ref_price_len)\n\t\tscore = score - 10 * abs(float(item[\"rating\"]) - ref_rating)\n\t\titem[\"score\"] = score\n\n\tfor item in list_of_nearby:\n\t\treturn_list = []\n\t\treturn_list.append({\"id\": item[\"id\"], \"score\": item[\"score\"]})\n\n\treturn_list = sorted(return_list, key = lambda i: i[\"score\"])\n\treturn return_list", "def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"", "def getClosestPositionOpponent(self, idx, pos, defense=False):\n minD = 10000\n opponentPos = [pos]\n for p in self.beliefs[idx]:\n if defense:\n d = self.getMazeDistanceDefense(pos, p)\n else:\n d = self.getMazeDistance(pos, p)\n if minD > d:\n minD = d\n opponentPos = [p]\n elif minD == d:\n opponentPos.append(p)\n return random.choice(opponentPos)", "def find_closest_move(self, position, valid_moves):\n closest = None\n closest_value = 100\n for move in valid_moves:\n if closest:\n x_dist = abs(position[0] - move[0])\n y_dist = abs(position[1] - move[1])\n if x_dist + y_dist < closest_value:\n closest = move\n closest_value = x_dist + y_dist\n else:\n closest = move\n\n return closest", "def nearest_voxel(center, roi):\n nearest=[]\n min_dist = 10000\n for vxl in roi:\n dist = sum(abs(np.subtract(vxl,center)))/3\n if dist < min_dist:\n min_dist=dist\n nearest=[vxl]\n elif dist==min_dist:\n nearest.append(vxl)\n # print(nearest)\n return nearest[random.randint(0,len(nearest)-1)]", "def determine_closest(self, targets):\n min_distance = None\n closest = None\n targets = filter(lambda x: not x.owner or x.owner is self, targets)\n for target in targets:\n # If target currently in use, skip it\n if target.occupied_by:\n print(f\"{target.name}: {target.x},{target.y} occupied by {target.occupied_by.name}\")\n continue\n\n # If target is known to be broken, skip it\n if target in self.memories.broken_items:\n continue\n\n dx = target.x - self.x\n dy = target.y - self.y\n distance = math.sqrt(dx**2 + dy**2)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n closest = target\n\n return closest", "def _closest_front_opponent(self, raw_obs, o, target):\n delta = target - o\n min_d = None\n closest = None\n for p in raw_obs['right_team']:\n delta_opp = p - o\n if np.dot(delta, delta_opp) <= 0:\n continue\n d = self._object_distance(o, p)\n if min_d is None or d < min_d:\n min_d = d\n closest = p\n\n # May return None!\n return closest", "def find_closest(self, cls):\n closest = None\n shortest_dist = None\n for sprite in self.game.entities[ALL_SPRITES]:\n if isinstance(sprite, cls):\n curr_dist = distance((self.x, self.y), (sprite.x, sprite.y))\n if shortest_dist is None or curr_dist < shortest_dist:\n closest = sprite\n shortest_dist = curr_dist\n return closest", "def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx", "def calc_nearest_ind(self, robot_pose):\n pass", "def findPathToClosestDot(self, gameState):\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n return search.bfs(problem)\n util.raiseNotDefined()", "def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx", "def FindClosestInsertedPoint(self, ):\n ...", "def get_closest_node(data, loc):\n min_dist = None\n closest = None\n for i in data:\n # Standard min-value search loop\n dist = great_circle_distance(get_coords(data, i), loc)\n if closest is None or dist < min_dist:\n closest = i\n min_dist = dist\n return closest", "def _get_nearest_point(self, position):\n nearest_inds = np.round(position / self._maze.grid_side - 0.5)\n return nearest_inds.astype(int)", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def find_nearest(value,array):\n idx = numpy.abs(value-array).argmin()\n return idx,array[idx]", "def nearest(source):\n def mycmp(a,b):\n return -cmp(a[1],b[1])\n dmin = 999.999\n smin = 'Unknown'\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n sdlist=[]\n for s in stars_:\n d = distance(s[0],source)\n sdlist.append((s[0],d))\n if d < dmin:\n dmin = d\n smin = s[0]\n sdlist.sort(mycmp)\n for sd in sdlist:\n print \"%s at %g\" % (sd[0],sd[1])\n print \"Nearest object from stars() to %s is %s at %g deg\" % (source,smin,dmin)", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()", "def getNearestPreference(self, myABR):\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1 and (enemyShip.myShipHull.abr in globals.targetPreference[myABR]):\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n return closestShip", "def getNearestTarget(self):\n if self.myShipHull.abr in globals.targetPreference.keys():\n closestShip = self.getNearestPreference(self.myShipHull.abr)\n if closestShip != None:\n return closestShip\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1:\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n if closestShip == None and self.myGalaxy.shipsUnderAssault() == 0:\n try:\n self.myGalaxy.endSimulation(self.empireID)\n except:\n pass\n return closestShip", "def getSafeFoodGoal(self, gameState):\n food = self.safeFood\n # print(food)\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(food) > 0:\n dis = 9999\n nearestFood = food[0]\n for a in food:\n temp = self.getMazeDistance(myPos, a)\n if temp < dis:\n dis = temp\n nearestFood = a\n return nearestFood, dis\n else:\n return None, None", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n return search.breadthFirstSearch(problem)\n util.raiseNotDefined()", "def find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx" ]
[ "0.7051569", "0.7051569", "0.6634373", "0.64470977", "0.6331274", "0.63046014", "0.6298118", "0.6280977", "0.6156889", "0.6088076", "0.60613", "0.6055863", "0.6047264", "0.6038702", "0.60233784", "0.6021113", "0.60130227", "0.60057616", "0.5998253", "0.59963185", "0.59912944", "0.59853625", "0.59762573", "0.59752655", "0.59711623", "0.5968118", "0.59665674", "0.5956562", "0.5931294", "0.5917708" ]
0.7583291
0
Assert that env variables are replaced in parsed config.
def test_env_variables_replaced(self): password = "ABC123qwe" parsed_config = self._get_parsed_config("full_config.yml") logger_with_replaced_password = parsed_config.loggers[0] # replaced if env variable is present self.assertEqual( password, logger_with_replaced_password._auth.password, msg="password is not replaced", ) db_backup_item_with_unchaged_password = parsed_config.backup_items[3] # not replaced if there is no such env variable self.assertEqual( "${MYSQL_PASSWORD}", db_backup_item_with_unchaged_password.password, msg="password should not be replaced", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config_env_var_verify():\n with open('skywalking/config.py', 'r') as config_file:\n data = config_file.read().replace('\\n', '')\n for each in OPTIONS.keys():\n if f'_{each.upper()}' not in data:\n raise Exception(f'Environment variable for {each.upper()} is not found in config.py\\n'\n f'This means you have a mismatch of config.py variable and env var name')", "def test_fromEnv_bad6(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_SERVER_BINDADDR\"] = \"dummy-lyrical_content,boom-127.0.0.1:6666\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_fromEnv_bad4(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_EXTENDED_SERVER_PORT\"] = \"cakez\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_parse_from_env_vars(mock_os_environ, settings_update, var_content, expected):\n climate = core.Climate()\n os.environ[\"MY_VAR\"] = var_content\n climate.update(settings_update)\n actual = dict(climate.settings)\n assert actual == expected", "def test_fromEnv_bad5(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_ORPORT\"] = \"lulz\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_fromEnv_bad7(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_SERVER_BINDADDR\"] = \"dummy-127.0.0.1:5556,laughs-127.0.0.1:6666\"\n TEST_ENVIRON[\"TOR_PT_SERVER_TRANSPORTS\"] = \"dummy,boom\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_fromEnv_bad8(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_SERVER_BINDADDR\"] = \"dummy-127.0.0.1:5556,laughs-127.0.0.1:6666\"\n TEST_ENVIRON[\"TOR_PT_SERVER_TRANSPORTS\"] = \"dummy\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_env_top_dict(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n FOO: This is foo\n FOO_WITH_QUOTES: \"\\\"Quoted foo\\\"\" # Quotes included in value\n BAR: \"This is bar\"\n MAGIC: 42\n SWITCH_1: true # YAML boolean\n SWITCH_2: \"true\" # YAML string\n EMPTY: \"\"\n EXTERNAL: # Comes from os env\n EXTERNAL_NOTSET: # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_1=\"True\", # Unfortunately this is due to str(bool(1))\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def test_fromEnv_bad9(self):\n TEST_ENVIRON = dict(BASE_ENVIRON)\n TEST_ENVIRON[\"TOR_PT_SERVER_BINDADDR\"] = \"dummy-127.0.0.1:5556\"\n TEST_ENVIRON[\"TOR_PT_SERVER_TRANSPORTS\"] = \"dummy,laughs\"\n os.environ = TEST_ENVIRON\n self.assertRaises(EnvError, self.plugin._loadConfigFromEnv)\n self.assertOutputLinesStartWith(\"ENV-ERROR \")", "def test_env_top_list(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n - FOO=This is foo # No quotes\n - FOO_WITH_QUOTES=\"Quoted foo\" # Quotes included in value\n - BAR=This is bar\n - MAGIC=42\n - SWITCH_2=true\n - EMPTY=\n - EXTERNAL # Comes from os env\n - EXTERNAL_NOTSET # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def test_env_var_settings_set(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"wowww\"\n\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"goood\"\n\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS(\"123456\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"yesss\"\n\n del os.environ[\"YESSSSMS_LOGIN\"]\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"123456\"\n assert sms._logindata[\"login_passwort\"] == \"password\"\n assert sms._provider == \"yesss\"", "def test_volumes_with_invalid_env_vars(self, monkeypatch):\n # Ensure that the entry does not exist in the environment\n monkeypatch.delenv(\"TEST_VAR1\", raising=False)\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n $TEST_VAR1/foo: /host/foo\n \"\"\"\n )\n self._invalid_config(\"TEST_VAR1\")", "def test_envvar(config):\n assert not config.get('TESTVAR_CONFIG_ENVVAR', False)\n try:\n tmp_path = tempfile.mkdtemp()\n # Write config into instance folder.\n with open(os.path.join(tmp_path, 'testapp.cfg'), 'w') as f:\n f.write(\"TESTVAR_CONFIG_ENVVAR = True\\n\")\n\n os.environ['MYPREFIX_TEST_CONFIG'] = os.path.join(\n tmp_path, 'testapp.cfg'\n )\n\n config.from_envvar(variable_name='MYPREFIX_TEST_CONFIG')\n assert config.get('TESTVAR_CONFIG_ENVVAR', False)\n finally:\n shutil.rmtree(tmp_path)", "def test_read_env_config2(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"wowww\"", "def test_config_filename_not_given_envvar_is_empty(monkeypatch):\n monkeypatch.delenv(ENV_CONFIG_FILE)\n with pytest.raises(ValueError):\n Config()", "def test_metadata_cache_uri_set_via_env_vars(monkeypatch, caplog):\n ENV_METADATA_CACHE_URI = environ_names_and_sections[NAME_METADATA_CACHE_URI][0]\n ENV_AQUARIUS_URL = deprecated_environ_names[NAME_AQUARIUS_URL][0]\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI, raising=False)\n monkeypatch.delenv(ENV_AQUARIUS_URL, raising=False)\n config = Config()\n metadata_cache_uri = config.metadata_cache_uri\n assert metadata_cache_uri == \"https://aquarius.marketplace.oceanprotocol.com\"\n\n monkeypatch.setenv(ENV_METADATA_CACHE_URI, \"https://custom-aqua.uri\")\n config = Config()\n assert config.metadata_cache_uri == \"https://custom-aqua.uri\"\n\n monkeypatch.setenv(ENV_AQUARIUS_URL, \"https://another-aqua.url\")\n with pytest.raises(ValueError):\n Config()\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI)\n config = Config()\n assert config.metadata_cache_uri == \"https://another-aqua.url\"\n assert (\n \"Config: AQUARIUS_URL envvar is deprecated. Use METADATA_CACHE_URI instead.\"\n in caplog.text\n )", "def verify_environment():\n reqs = ['NAME', 'RECIPIENT', 'SUBJECT', 'MESSAGE',\n 'MAILGUN_API_KEY', 'MAILGUN_DOMAIN']\n for req in reqs:\n if not os.getenv(req):\n logging.error('Environment variable ' + req + ' is not set')\n sys.exit(2)", "def test_env_alias(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n aliases:\n al:\n script: Don't care\n environment:\n FOO: Overridden\n MORE: Hello world\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n assert config.aliases[\"al\"].environment == dict(\n FOO=\"Overridden\",\n MORE=\"Hello world\",\n )", "def test_read_env_config4(config, environment_vars_set):\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"yesss\"", "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()", "def test_read_env_config3(config, environment_vars_set):\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"goood\"", "def checkEnvVar(self):\n for path in self.config.options('ENV'):\n if (self.config.get('ENV', path)).startswith('/'):\n print (\"Checking path for \"+path).ljust(65, '.'),\n if not os.path.exists(self.config.get('ENV', path)):\n print \"[ Failed ]\"\n print \"\\n***ERROR: %s not found. Check the config file.\" % path\n sys.exit()\n else:\n print \"[ OK ]\"", "def check_user_environment(config):\n if not config.has_section('user_env_vars'):\n return\n\n for env_var in config.keys('user_env_vars'):\n if env_var in os.environ:\n msg = '{} is already set in the environment. '.format(env_var) +\\\n 'Overwriting from conf file'\n config.logger.warning(msg)", "def test_environ(run_nait) -> None: # type: ignore\n result = run_nait(['--environ-shell', '--environ', 'foo=bar']).stdout.decode('utf-8')\n assert result.find('export NANAIMO_UNITTEST=\"This is a nanaimo unittest environment.\"') != -1\n assert result.find('export foo=\"bar\"') != -1", "def test_load_config_from_environment(env_config):\n args = argparse.Namespace()\n cfg = configure(args)\n\n assert cfg[CFG_CLOUD_PROVIDER][CFG_CP_GCP_PROJECT] == env_config['ELB_GCP_PROJECT']\n assert cfg[CFG_CLOUD_PROVIDER][CFG_CP_GCP_REGION] == env_config['ELB_GCP_REGION']\n assert cfg[CFG_CLOUD_PROVIDER][CFG_CP_GCP_ZONE] == env_config['ELB_GCP_ZONE']\n assert cfg[CFG_BLAST][CFG_BLAST_BATCH_LEN] == env_config['ELB_BATCH_LEN']\n assert cfg[CFG_CLUSTER][CFG_CLUSTER_NAME] == env_config['ELB_CLUSTER_NAME']\n assert cfg[CFG_CLUSTER][CFG_CLUSTER_USE_PREEMPTIBLE] == env_config['ELB_USE_PREEMPTIBLE']\n assert cfg[CFG_CLUSTER][CFG_CLUSTER_BID_PERCENTAGE] == env_config['ELB_BID_PERCENTAGE']", "def test_remove_envvar():\n client = TestClient()\n client.run('config set env.MY_VAR=MY_VALUE')\n conf_file = load(client.cache.conan_conf_path)\n assert 'MY_VAR = MY_VALUE' in conf_file\n client.run('config rm env.MY_VAR')\n conf_file = load(client.cache.conan_conf_path)\n assert 'MY_VAR' not in conf_file", "def test_settings_env_file_and_env(mock_env_settings_file, tmpdir):\n settings_map = settings_parser.Settings(prefix='TEST_STUFF')\n assert isinstance(settings_map, Mapping)\n assert dict(settings_map) == {\n 'testgroup': {\n 'testvar': 7,\n 'test_var': 6\n }, 'othergroup': {\n 'blabla': 555\n },\n 'testgroup_test_var': 9\n }", "def test_env_vars():\n # Create a variable with the file system encoding and save it\n # in our PYTHONPATH\n env_var = to_fs_from_unicode(u'ñññ')\n CONF.set('main', 'spyder_pythonpath', [env_var])\n\n # Create a kernel spec\n kernel_spec = SpyderKernelSpec()\n\n # Assert PYTHONPATH is in env vars and it's not empty\n assert kernel_spec.env['PYTHONPATH'] != ''\n\n # Assert all env vars are binary strings\n assert all([is_binary_string(v) for v in kernel_spec.env.values()])\n\n # Remove our entry from PYTHONPATH\n CONF.set('main', 'spyder_pythonpath', [])", "def test_environment_patchtest(self):\n self.env = patch.dict('os.environ', {'hello': 'world'})\n with self.env:\n self.assertEqual(os.environ['hello'], 'world')", "def test_env_params_are_defined_in_template(yaml_file):\n\n bad = []\n template_pair = get_environment_pair(yaml_file)\n\n if not template_pair:\n pytest.skip(\"No yaml/env pair could be determined\")\n\n template = template_pair.get(\"yyml\").get(\"parameters\", {})\n environment = template_pair.get(\"eyml\").get(\"parameters\", {})\n\n if not isinstance(template, dict) or not isinstance(environment, dict):\n pytest.skip(\"No parameters defined in environment or template\")\n\n template = template.keys()\n environment = environment.keys()\n\n for parameter in environment:\n if parameter not in template:\n bad.append(\n (\n \"{} is defined in the environment file but not in \"\n + \"the template file \"\n ).format(parameter)\n )\n msg = (\n \"All parameters defined in an environment file must \"\n + \"be defined in the template file. \"\n + \". \".join(bad)\n )\n\n assert not bad, msg" ]
[ "0.73749894", "0.73658586", "0.7352894", "0.7328954", "0.7326987", "0.73115546", "0.7239576", "0.7118701", "0.70504314", "0.6948533", "0.6660295", "0.6642592", "0.6521425", "0.6511922", "0.6486655", "0.64748263", "0.64657277", "0.64488757", "0.6378391", "0.63549876", "0.6346314", "0.6343382", "0.63375777", "0.6333335", "0.6333231", "0.63251483", "0.63134307", "0.6311593", "0.62948674", "0.62931436" ]
0.79440945
0
Exports the current board to an FEN format meant for exporting
def export_to_FEN(self): final_FEN_array = [] # Output state of pieces final_FEN_array.append(self._export_board()) final_FEN_array.append('w' if self.white_to_move else 'b') final_FEN_array.append(self.available_castles if self.available_castles else "-") final_FEN_array.append(self.en_passants if self.en_passants else "-") final_FEN_array.append(str(self.half_moves)) final_FEN_array.append(str(self.full_moves)) return " ".join(final_FEN_array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toFEN(self):\n row_strs = []\n for row in range(0,8):\n row = self.board[row]\n \n counter = 0\n row_str = \"\"\n for cell in row:\n if cell:\n if counter == 0:\n row_str += pieces_s[cell]\n else:\n row_str += \"%d%s\"%(counter,pieces_s[cell]) \n counter = 0\n else:\n counter += 1\n \n if counter:\n row_str += str(counter)\n \n row_strs.append(row_str)\n \n toplay = \"w\" if self.toplay == \"WHITE\" else \"b\"\n boardrep = \"/\".join(row_strs) \n return \"%s %s\"%(boardrep,toplay)", "def _export_board(self):\n final_board = []\n\n for row in self.board:\n final_row = []\n\n # Tracks the number of continguous empty spaces in the current place\n spaces = 0\n\n for square in row:\n # we hit an empty space\n if square == EMPTY_SPACE:\n spaces += 1\n else:\n final_row.append(str(spaces) + square)\n spaces = 0\n\n final_row.append(str(spaces))\n final_row = \"\".join(final_row).replace('0', '')\n\n final_board.append(final_row)\n\n return \"/\".join(final_board)", "def print_final_board(self, board):\n\t\tinterpreted = self.interpret_board(board)\n\t\tself.print_board(interpreted)", "def write_to_file(board, output_file = \"solution.sud\"):\n with open(output_file, \"w\") as f:\n for i in range(n):\n if i and i%3==0:\n f.write(\"------+-------+------\\n\")\n for j in range(n): \n if j and j%3==0:\n f.write(\"| \")\n if len(board[i][j]) == 1:\n f.write(str(board[i][j][0]) + \" \")\n else:\n f.write(\". \")\n elif j==8:\n if len(board[i][j]) == 1:\n f.write(str(board[i][j][0]) + \"\\n\")\n else:\n f.write(\".\\n\")\n else:\n if len(board[i][j]) == 1:\n f.write(str(board[i][j][0]) + \" \")\n else:\n f.write(\". \")\n return 0", "def _print_board(board):\r\n pass", "def exportFBX(self, selected = None):\r\n # TODO include logic for skeleton export\r\n pm.mel.FBXExport(f= self.FilePath + '\\{}'.format(selected), s=True)\r\n print '{} has been exported'.format(selected)", "def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1", "def printBoard(self):", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def write_hex_dump(machine, cols=32):\n fname = \"game\"\n with open(fname + \".hexdmp\", \"w+\") as f:\n f.write(hex_dump(machine, cols))", "def print_to_file(self, board, filename, header=None):\n if not os.path.exists(self.folder):\n os.makedirs(self.folder)\n #filename = board.id if board.id else \"board.txt\"\n file = os.path.join(self.folder, filename)\n header_only = False #os.path.isfile(file)\n with (open(file,'a+')) as f:\n if header:\n print(\" === \", header, \" === \", file=f)\n else:\n print(\" === ??? === \", file=f)\n if header_only:\n print(\"board.id:\", board.id, file=f)\n print(\"rotation:\", board.rotation, \", pieces:\", board.count_pieces(), \", halfMoves:\", board.halfMoves, \", no-progress:\", board.noProgressCount, file=f)\n else:\n board.display(f)\n \n self.print_legal_moves(board, f)\n #print(\"executed_moves:\", board.executed_moves, file=f)\n f.closed \n return file", "def print_board(self):\n self.board.print()", "def api_print_board(self):\n print(self.board)", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def display_board(self, board):\r\n print(\" 0 1 2 3 4 5 6 7\")\r\n for x, row in enumerate(board):\r\n sys.stdout.write(str(x))\r\n for val in row:\r\n if val == 1:\r\n sys.stdout.write(\"|b\")\r\n elif val == -1:\r\n sys.stdout.write(\"|w\")\r\n elif val == 2:\r\n sys.stdout.write(\"|B\")\r\n elif val == -2:\r\n sys.stdout.write(\"|W\")\r\n else:\r\n sys.stdout.write(\"| \")\r\n print(\"|\")", "def _write_foft(parameters):\n # Formats\n fmt = block_to_format[\"FOFT\"]\n fmt = str2format(fmt[5])\n\n values = [x for x in parameters[\"element_history\"]]\n out = write_record(values, fmt, multi=True)\n\n return out", "def print_board(self):\n print('Board:')\n print('\\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.board]))", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r)\n #if p != None and p.header.frame_id == \"chess_board\":\n # print \"Warning, frame is chess_board:\", c+str(r)", "def print_current_board(self):\n\n # iterate through the range in reverse order\n for r in range(9, -2, -1):\n output = \"\"\n if r == 9 or r == 0:\n # then the top or bottom of the board\n output = \" +------------------------+\"\n elif r == -1:\n # then show the ranks\n output = \" a b c d e f g h\"\n else: # board\n output = \" \" + str(r) + \" |\"\n # fill in all the files with pieces at the current rank\n for file_offset in range(0, 8):\n # start at a, with with file offset increasing the char\n f = chr(ord(\"a\") + file_offset)\n current_piece = None\n for piece in self.game.pieces:\n if piece.file == f and piece.rank == r:\n # then we found the piece at (file, rank)\n current_piece = piece\n break\n\n code = \".\" # default \"no piece\"\n if current_piece:\n # the code will be the first character of their type\n # e.g. 'Q' for \"Queen\"\n code = current_piece.type[0]\n\n if current_piece.type == \"Knight\":\n # 'K' is for \"King\", we use 'N' for \"Knights\"\n code = \"N\"\n\n if current_piece.owner.id == \"1\":\n # the second player (black) is lower case.\n # Otherwise it's uppercase already\n code = code.lower()\n\n output += \" \" + code + \" \"\n\n output += \"|\"\n print(output)", "def save(self, game):\n try:\n with open(self.filename, mode='w+') as file:\n # First char in the file is the next player\n file.write(game.next_player)\n # Then the board as a string of 64 characters\n file.write(str(game.board))\n\n except IOError as err:\n print(f\"Error saving file: {err}\")", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def export_to_file(self):\r\n return True", "def write_fr_cards(bc_file, bc_class):\n fr = bc_class.friction_controls\n if not fr.empty:\n bc_file.write('! Friction Controls\\n')\n bc_file.write(fr.to_csv(sep=' ', na_rep='', index=False, header=False,).replace('\\r\\n', '\\n'))\n bc_file.write('\\n') # blank line after Friction Controls", "def print_board(self):\n board = \"\"\n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += self.board[i][j]\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board", "def show_board(board) -> None:\n for line in board:\n print('|'.join(line))", "def export(self, file: TextIO) -> None:\n file.write(f'\"{self.name}\"\\n\\t{{\\n')\n file.write(f'\\tchannel {self.channel}\\n')\n file.write(f'\\tsoundlevel {join_float(self.level)}\\n')\n\n if self.volume != (1, 1):\n file.write(f'\\tvolume {join_float(self.volume)}\\n')\n if self.pitch != (100, 100):\n file.write(f'\\tpitch {join_float(self.pitch)}\\n')\n\n if len(self.sounds) != 1:\n file.write('\\trndwave\\n\\t\\t{\\n')\n for wav in self.sounds:\n file.write(f'\\t\\twave \"{wav}\"\\n')\n file.write('\\t\\t}\\n')\n else:\n file.write(f'\\twave \"{self.sounds[0]}\"\\n')\n\n if self.force_v2 or self.stack_start or self.stack_stop or self.stack_update:\n file.write(\n '\\t' 'soundentry_version 2\\n'\n '\\t' 'operator_stacks\\n'\n '\\t\\t' '{\\n'\n )\n if self.stack_start:\n file.write(\n '\\t\\t' 'start_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_start:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_update:\n file.write(\n '\\t\\t' 'update_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_update:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_stop:\n file.write(\n '\\t\\t' 'stop_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_stop:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n file.write('\\t\\t}\\n')\n file.write('\\t}\\n')", "def print_board(self):\n print(*self._board, sep=\"\\n\")", "def print_board(self):\n print(self.board)", "def show_board(self):\n\n for s in self.board[1:-1]:\n print(''.join(x.symbol for x in s[1:-1]))", "def export_bom(self):\n path = self.export_dir.joinpath(self.partcode).joinpath('bom.xlsx')\n bom = self.doc.ComponentDefinition.BOM\n bom.StructuredViewFirstLevelOnly = False\n bom.StructuredViewEnabled = True\n bom.BOMViews.Item(\"Structured\").Export(path, 74498)" ]
[ "0.681017", "0.6483553", "0.61728805", "0.59665763", "0.5963671", "0.5932981", "0.5874577", "0.58599985", "0.58199704", "0.56737715", "0.5647533", "0.55947274", "0.55381477", "0.5481513", "0.54759187", "0.5461627", "0.5449034", "0.5441245", "0.54395163", "0.54376006", "0.542368", "0.54047865", "0.539751", "0.5391908", "0.5354061", "0.53522354", "0.534647", "0.5338476", "0.53328544", "0.5332379" ]
0.7740351
0
converts a EAN move to internal coordinates in the board
def _EAN_coords_to_board_coords(EAN_move: str) -> (int, int): assert EAN_move[0] in "abcdefgh" and EAN_move[1] in "12345678", "failed to get " + EAN_move col = ord(EAN_move[0]) - ord('a') row = 8 - int(EAN_move[1]) return row, col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AN_to_coords(self, move: str):\n\n orig_move = move\n\n extra_info = \"\"\n\n # remove all characters that don't matter when parsing\n for pointless_char in \"x+#\":\n move = move.replace(pointless_char, \"\")\n\n # Handle castling\n if CASTLE_QUEENSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n elif CASTLE_KINGSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n\n # Pawn promotion\n if move[-2] == \"=\":\n extra_info = move[-1] if self.white_to_move else move[-1].lower()\n move = move[:-2]\n\n # Destination of move, this is the only guaranteed substring in the move\n dest_str = move[-2:]\n dest = State._EAN_coords_to_board_coords(dest_str)\n move = move[:-2]\n\n # Deduce what piece actually made the move, if there is no shown there is no pawn\n # Note in AN pieces are always uppercase and location is lowercase,\n # so this makes it simple to check if we have a piece or a location\n piece = \"P\"\n if move and move[0].isupper():\n piece = move[0]\n move = move[1:]\n if not self.white_to_move:\n piece = piece.lower()\n\n # At this point the only info the move should contain is a hint on where the piece is coming from\n loc_hint = move\n\n possible_moves = self.get_all_moves()\n possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination\n possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation\n for possible_move in possible_moves:\n row, col = State._EAN_coords_to_board_coords(possible_move[0:2])\n if self.board[row][col] == piece:\n return (row, col), dest, extra_info\n\n raise ValueError(\"Algebraic notation parsing failed, no valid move found matching the given move \" + orig_move\n + \" with board state\\n\" + str(self))", "def _coord_to_EAN(coords):\n row, col = coords\n col = chr(col + ord('a'))\n row = str(8 - row)\n return col + row\n return col + row", "def parse_EAN(self, EAN: str):\n\n if EAN == CASTLE_KINGSIDE:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n elif EAN == CASTLE_QUEENSIDE:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n\n assert 4 <= len(EAN) <= 5, \"Invalid EAN\"\n\n start = State._EAN_coords_to_board_coords(EAN[0:2])\n dest = State._EAN_coords_to_board_coords(EAN[2:4])\n\n # used to decide what piece to promote to when pawn reaches the end\n extra_info = \"\" if len(EAN) == 4 else EAN[4]\n\n return start, dest, extra_info", "def get_ai_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_ai_move(board, player):\n row, col = 0, 0\n return row, col", "def get_ai_move(board):\n return Connect4MiniMax.get_move(board)", "def toAN(board, move, short=False, castleNotation=CASTLE_SAN):\n\n fcord = (move >> 6) & 63\n tcord = move & 63\n flag = move >> 12\n\n if flag in (KING_CASTLE, QUEEN_CASTLE):\n if castleNotation == CASTLE_SAN:\n return flag == KING_CASTLE and \"O-O\" or \"O-O-O\"\n elif castleNotation == CASTLE_KR:\n rooks = board.ini_rooks[board.color]\n tcord = rooks[flag == KING_CASTLE and 1 or 0]\n # No treatment needed for CASTLE_KK\n\n if flag == DROP:\n if board.variant == SITTUYINCHESS:\n s = \"%s@%s\" % (reprSignSittuyin[fcord], reprCord[tcord])\n else:\n s = \"%s@%s\" % (reprSign[fcord], reprCord[tcord])\n else:\n s = reprCord[fcord] + reprCord[tcord]\n\n if flag in PROMOTIONS:\n if short:\n if board.variant in (CAMBODIANCHESS, MAKRUKCHESS):\n s += reprSignMakruk[PROMOTE_PIECE(flag)].lower()\n elif board.variant == SITTUYINCHESS:\n s += reprSignSittuyin[PROMOTE_PIECE(flag)].lower()\n else:\n s += reprSign[PROMOTE_PIECE(flag)].lower()\n else:\n if board.variant in (CAMBODIANCHESS, MAKRUKCHESS):\n s += \"=\" + reprSignMakruk[PROMOTE_PIECE(flag)]\n elif board.variant == SITTUYINCHESS:\n s += \"=\" + reprSignSittuyin[PROMOTE_PIECE(flag)]\n else:\n s += \"=\" + reprSign[PROMOTE_PIECE(flag)]\n return s", "def op_move_neast(self,piece):\n\n # Check common preconditions\n if(not self.op_move_preconditions()):\n return False\n\n # Check particular preconditions\n if(not self.op_move_neast_pre(piece)):\n return False\n\n # Variable extraction\n piece_x = piece[0]\n piece_y = piece[1]\n\n\n dir_ = self.get_direction()\n pieceCoords = (piece_x - (1 * dir_),piece_y + (1 * dir_))\n pieceValue = self.board.get_element(*pieceCoords)\n\n # End of pre conditions\n\n return self.op_move_postconditions(piece, pieceCoords)", "def undo_move(self):\r\n if len(self.moveLog) != 0:\r\n move = self.moveLog.pop()\r\n self.board[move.sr][move.sc] = move.pieceMoved\r\n self.board[move.er][move.ec] = move.pieceCaptured\r\n self.turn_white = not self.turn_white\r\n\r\n # king pos\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.sr, move.sc)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.sr, move.sc)\r\n\r\n # enpassant\r\n if move.isEnpassantMove:\r\n self.board[move.er][move.ec] = \"--\"\r\n self.board[move.sr][move.ec] = move.pieceCaptured\r\n self.enpas_pos = (move.er, move.ec)\r\n\r\n # pawn x2\r\n if move.pieceMoved[1] == \"p\" and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ()\r\n\r\n # castle rights\r\n self.castleRightsLog.pop()\r\n self.cr_castle_r = self.castleRightsLog[-1]\r\n\r\n # castle moves\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 1]\r\n self.board[move.er][move.ec - 1] = '--'\r\n else:\r\n self.board[move.er][move.ec - 2] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'", "def intf_ENTMOVE(E):\n input1ok= False\n if E.The.StackSize() >= 2: \n # CHECK INPUT #1\n # Check that next ready stack item is a LST of 3 VALs.\n check= E.The.StackCopyItemLast() # Input verification. Next item on stack now.\n # Probably should use inc.point_formatted_LST here. See ENTPGRAM.\n if check.whatami == \"LST\":\n if len(check.val)==3:\n #if not filter(lambda x:x.whatami!=\"VAL\",check.val):\n if all([x.whatami==\"VAL\" for x in check.val]):\n input1ok= True\n if not input1ok or not inc.entid_or_LST_of_entids(E.The,2):\n print(\"Input Error: move\")\n print(intf_ENTMOVE.__doc__)\n return # Without doing much of anything.\n myoffset= [ xyz.val for xyz in E.The.StackPop().val ] # A list [3.5 -2 0].\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n myent= MMEL.El[myeid]\n myent.translate(myoffset)\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n OUT.default(MMEL,E) # AUTODUMP ", "def computePosition(self, state):\n d = 0\n if state[5] == \"East\":\n d = 0\n elif state[5] == \"West\":\n d = 1\n elif state[5] == \"North\":\n d = 2\n else:\n d = 3\n return state[0]*64+state[1]*32+state[2]*16+state[3]*8+state[4]*4+d", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def state_to_position(self, state):\r\n dir = state % 4\r\n col = ((state - dir) / 4) % self.width\r\n row = (state - dir - col * 4) / (4 * self.width)\r\n return row, col, dir", "def move_enemy(self, hero, pad, graph, msg):\r\n hr,hc = hero.position\r\n\r\n er, ec = self.position\r\n\r\n distance = math.sqrt((math.pow(hc - ec, 2)) + math.pow(hr - er, 2))\r\n\r\n if distance <= 12 and distance > 1:\r\n if not line_of_sight(pad, hero.position, self.position, graph):\r\n return\r\n\r\n if hr > er:\r\n temp_er = er + 1\r\n elif hr < er:\r\n temp_er = er - 1\r\n else:\r\n temp_er = er\r\n\r\n if hc < ec:\r\n temp_ec = ec - 1\r\n elif hc > ec:\r\n temp_ec = ec + 1\r\n else:\r\n temp_ec = ec\r\n\r\n if math.sqrt((math.pow(hc - temp_ec, 2)) + math.pow(hr - er, 2)) <= math.sqrt((math.pow(hc - ec, 2)) + math.pow(hr - temp_er, 2)):\r\n if pad.instr(er, temp_ec, 1) in p.valid_moves:\r\n temp_tile = self.tile\r\n self.tile = pad.instr(er, temp_ec, 1) #update tile under enemy\r\n pad.addstr(er, temp_ec, self.character)\r\n pad.addstr(er, ec, temp_tile)\r\n self.position = (er, temp_ec)\r\n elif pad.instr(temp_er, ec, 1) in p.valid_moves:\r\n temp_tile = self.tile\r\n self.tile = pad.instr(temp_er, ec, 1)\r\n pad.addstr(temp_er, ec, self.character)\r\n pad.addstr(er, ec, temp_tile)\r\n self.position = (temp_er, ec)\r\n else:\r\n if pad.instr(temp_er, ec, 1) in p.valid_moves:\r\n temp_tile = self.tile\r\n self.tile = pad.instr(temp_er, ec, 1)\r\n pad.addstr(temp_er, ec, self.character)\r\n pad.addstr(er, ec, temp_tile)\r\n self.position = (temp_er, ec)\r\n elif pad.instr(er, temp_ec, 1) in p.valid_moves:\r\n temp_tile = self.tile\r\n self.tile = pad.instr(er, temp_ec, 1)\r\n pad.addstr(er, temp_ec, self.character)\r\n pad.addstr(er, ec, self.tile)\r\n self.position = (er, temp_ec)\r\n\r\n er, ec = self.position\r\n\r\n distance = math.sqrt((math.pow(hc - ec, 2)) + math.pow(hr - er, 2))\r\n\r\n if distance == 1:\r\n hero.defend_attack(self.attack, msg, self.name)", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def AeroMove(self, pos):\r\n\r\n pass", "def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()", "def parseAN(board, an):\n\n if not 4 <= len(an) <= 6:\n raise ParsingError(an, \"the move must be 4 or 6 chars long\", board.asFen())\n\n if \"@\" in an:\n tcord = cordDic[an[-2:]]\n if an[0].islower():\n # Sjeng-ism\n piece = chr2Sign[an[0]]\n else:\n piece = chrU2Sign[an[0]]\n return newMove(piece, tcord, DROP)\n\n try:\n fcord = cordDic[an[:2]]\n tcord = cordDic[an[2:4]]\n except KeyError as e:\n raise ParsingError(an, \"the cord (%s) is incorrect\" % e.args[0], board.asFen())\n\n flag = NORMAL_MOVE\n\n if len(an) > 4 and not an[-1] in \"QRBNMSFqrbnmsf\":\n if (\n (board.variant != SUICIDECHESS and board.variant != GIVEAWAYCHESS)\n or (board.variant == SUICIDECHESS or board.variant == GIVEAWAYCHESS)\n and not an[-1] in \"Kk\"\n ):\n raise ParsingError(an, \"invalid promoted piece\", board.asFen())\n\n if len(an) == 5:\n # The a7a8q variant\n flag = chr2Sign[an[4].lower()] + 2\n elif len(an) == 6:\n # The a7a8=q variant\n flag = chr2Sign[an[5].lower()] + 2\n elif board.arBoard[fcord] == KING:\n if fcord - tcord == 2:\n flag = QUEEN_CASTLE\n if board.variant == FISCHERRANDOMCHESS:\n tcord = board.ini_rooks[board.color][0]\n elif fcord - tcord == -2:\n flag = KING_CASTLE\n if board.variant == FISCHERRANDOMCHESS:\n tcord = board.ini_rooks[board.color][1]\n elif board.arBoard[tcord] == ROOK:\n color = board.color\n friends = board.friends[color]\n if bitPosArray[tcord] & friends:\n if board.ini_rooks[color][0] == tcord:\n flag = QUEEN_CASTLE\n else:\n flag = KING_CASTLE\n else:\n flag = NORMAL_MOVE\n elif (\n board.arBoard[fcord] == PAWN\n and board.arBoard[tcord] == EMPTY\n and FILE(fcord) != FILE(tcord)\n and RANK(fcord) != RANK(tcord)\n ):\n flag = ENPASSANT\n elif board.arBoard[fcord] == PAWN:\n if an[3] in \"18\" and board.variant != SITTUYINCHESS:\n flag = QUEEN_PROMOTION\n\n return newMove(fcord, tcord, flag)", "def edwin_transform(self, coordinates):\n edwinx = int(3.459 * coordinates[0] - 1805)\n edwiny = int(6.167 * coordinates[1] - 2400)\n edwinz = int(37.50 * coordinates[2] + 2667)\n\n if edwinx > 4000:\n edwinx = 4000\n elif edwinx < 0:\n edwinx = 0\n\n if edwiny > 4000:\n edwiny = 4000\n elif edwiny < -400:\n edwiny = -400\n\n if edwinz > 3500:\n edwinz = 3500\n elif edwinz < -600:\n edwinz = -600\n\n return edwinx, edwiny, edwinz", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def ConvertToCoordinate(pos):\n\n return pos // BOARD_DIMENSION, pos % BOARD_DIMENSION", "def apply_move(cell, x, y):\r\n x2 = (co_ords[cell])[0] + x\r\n y2 = (co_ords[cell])[1] + y\r\n return (x2, y2)", "def parse_move_to_square(self, uci_move: str):\n chars = utils.split_string_to_chars(uci_move)\n square_from = ''.join(chars[0] + chars[1])\n square_to = ''.join(chars[2] + chars[3])\n return square_from, square_to", "def ConvertToPosition(coor):\n\n return coor[0] * BOARD_DIMENSION + coor[1]", "def getMove(self, board):\n pass", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)", "def move(self, AN_str):\n self._move(*self._AN_to_coords(AN_str))", "def domove(self, depart, arrivee, promote):\n\n # Debugging tests\n # if(self.cases[depart].isEmpty()):\n # print('domove() ERROR : asked for an empty square move : ',depart,arrivee,promote)\n # return \n # if(int(depart)<0 or int(depart)>63):\n # print('domove() ERROR : incorrect FROM square number : ',depart)\n # return \n # if(int(arrivee)<0 or int(arrivee)>63):\n # print('domove() ERROR : incorrect TO square number : ',arrivee)\n # return\n # if(not(promote=='' or promote=='q' or promote=='r' or promote=='n' or promote=='b')):\n # print('domove() ERROR : incorrect promote : ',promote)\n # return\n\n # Informations to save in the history moves\n pieceDeplacee = self.cases[depart] # moved piece\n piecePrise = self.cases[arrivee] # taken piece, can be null : Piece()\n isEp = False # will be used to undo a ep move\n histEp = self.ep # saving the actual ep square (-1 or square number TO)\n hist_roque_56 = self.white_can_castle_56\n hist_roque_63 = self.white_can_castle_63\n hist_roque_0 = self.black_can_castle_0\n hist_roque_7 = self.black_can_castle_7\n flagViderEp = True # flag to erase ep or not : if the pawn moved is not taken directly, it can't be taken later\n\n # Moving piece\n self.cases[arrivee] = self.cases[depart]\n self.cases[depart] = Piece()\n\n self.ply += 1\n\n # a PAWN has been moved -------------------------------------\n if (pieceDeplacee.nom == 'PION'):\n\n # White PAWN\n if (pieceDeplacee.couleur == 'blanc'):\n\n # If the move is \"en passant\"\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee + 8] # take black pawn\n self.cases[arrivee + 8] = Piece()\n isEp = True\n\n # The white pawn moves 2 squares from starting square\n # then blacks can take \"en passant\" next move\n elif (self.ROW(depart) == 6 and self.ROW(arrivee) == 4):\n self.ep = arrivee + 8\n flagViderEp = False\n\n # Black PAWN\n else:\n\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee - 8]\n self.cases[arrivee - 8] = Piece()\n isEp = True\n\n elif (self.ROW(depart) == 1 and self.ROW(arrivee) == 3):\n self.ep = arrivee - 8\n flagViderEp = False\n\n # a ROOK has been moved--------------------------------------\n # update castle rights\n\n elif (pieceDeplacee.nom == 'TOUR'):\n\n # White ROOK\n if (pieceDeplacee.couleur == 'blanc'):\n if (depart == 56):\n self.white_can_castle_56 = False\n elif (depart == 63):\n self.white_can_castle_63 = False\n\n # Black ROOK\n else:\n if (depart == 0):\n self.black_can_castle_0 = False\n elif (depart == 7):\n self.black_can_castle_7 = False\n\n # a KING has been moved-----------------------------------------\n\n elif (pieceDeplacee.nom == 'ROI'):\n\n # White KING\n if (pieceDeplacee.couleur == 'blanc'):\n\n # moving from starting square\n if (depart == 60):\n # update castle rights\n self.white_can_castle_56 = False\n self.white_can_castle_63 = False\n\n # If castling, move the rook\n if (arrivee == 58):\n self.cases[56] = Piece()\n self.cases[59] = Piece('TOUR', 'blanc')\n\n elif (arrivee == 62):\n self.cases[63] = Piece()\n self.cases[61] = Piece('TOUR', 'blanc')\n\n # Black KING\n else:\n\n if (depart == 4):\n self.black_can_castle_0 = False\n self.black_can_castle_7 = False\n\n if (arrivee == 6):\n self.cases[7] = Piece()\n self.cases[5] = Piece('TOUR', 'noir')\n\n elif (arrivee == 2):\n self.cases[0] = Piece()\n self.cases[3] = Piece('TOUR', 'noir')\n\n # End pieces cases-----------------------------------------------\n\n # Any move cancels the ep move\n if (flagViderEp == True):\n self.ep = -1\n\n # Promote : the pawn is changed to requested piece\n if (promote != ''):\n if (promote == 'q'):\n self.cases[arrivee] = Piece('DAME', self.side2move)\n elif (promote == 'r'):\n self.cases[arrivee] = Piece('TOUR', self.side2move)\n elif (promote == 'n'):\n self.cases[arrivee] = Piece('CAVALIER', self.side2move)\n elif (promote == 'b'):\n self.cases[arrivee] = Piece('FOU', self.side2move)\n\n # Change side to move\n self.changeTrait()\n\n # Save move to the history list\n self.history.append((depart, \\\n arrivee, \\\n pieceDeplacee, \\\n piecePrise, \\\n isEp, \\\n histEp, \\\n promote, \\\n hist_roque_56, \\\n hist_roque_63, \\\n hist_roque_0, \\\n hist_roque_7))\n\n # If the move lets king in check, undo it and return false\n if (self.in_check(self.oppColor(self.side2move))):\n self.undomove()\n return False\n\n return True" ]
[ "0.77504724", "0.6948253", "0.64844483", "0.6003651", "0.59367543", "0.5791499", "0.5699505", "0.5690667", "0.5684613", "0.56577367", "0.5653698", "0.563426", "0.5631644", "0.5626147", "0.56250554", "0.55736893", "0.5568672", "0.55596614", "0.55477536", "0.5547573", "0.55429846", "0.55373436", "0.5534924", "0.5510446", "0.54956424", "0.54897064", "0.54878825", "0.54760337", "0.5473871", "0.5473019" ]
0.7544965
1
Invalidates castling if either the king or rook was moved
def _invalidate_castles(self): if self.board[0][0] != 'r': # Black queenside self.available_castles = self.available_castles.replace('q', '') if self.board[0][7] != 'r': # Black kingside self.available_castles = self.available_castles.replace('k', '') if self.board[7][0] != 'R': # White queenside self.available_castles = self.available_castles.replace('Q', '') if self.board[7][7] != 'R': # White kingside self.available_castles = self.available_castles.replace('K', '') if self.board[0][4] != 'k': self.available_castles = self.available_castles.replace('k', '') self.available_castles = self.available_castles.replace('q', '') if self.board[7][4] != 'K': self.available_castles = self.available_castles.replace('K', '') self.available_castles = self.available_castles.replace('Q', '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def unmakeMove(self, move):", "def cancel_move(self):\n self.should_move = False", "def is_king_move_valid(self, from_row, from_col, to_row, to_col):\n\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n\n if abs(to_row - from_row) <= 1 and abs(to_col - from_col) <= 1:\n if piece_color == \"white\":\n self.whiteCanCastleKside = False\n self.whiteCanCastleQside = False\n else:\n self.blackCanCastleKside = False\n self.blackCanCastleQside = False\n return True\n\n # TODO Castling implementation\n # if king and rook have not been moved yet this game, and no space between\n # the king and the rook are occupied or threatened, then the king can\n # move 2 spaces towards the rook, and the rook will be placed adjacent to the\n # king on the side closer to the center column.\n\n # TODO need function which returns squares being threatened which takes a piece position and board as a param\n\n if (piece_color == \"white\"):\n if self.whiteCanCastleKside and (from_row == 7 and from_col == 4) and (to_row == from_row) and (to_col == 6):\n # White kingside Castle\n if (self.board.squares[7][5] == None and self.board.squares[7][6] == None):\n if not self.testing:\n self.whiteCanCastleKside = False\n self.whiteCanCastleQside = False\n self.board.move_piece(7, 7, 7, 5)\n return True\n\n if self.whiteCanCastleQside and (from_row == 7 and from_col == 4) and (to_row == from_row) and (to_col == 2):\n # White queenside Castle\n if (self.board.squares[7][3] == None and self.board.squares[7][2] == None and self.board.squares[7][1] == None):\n\n if not self.testing:\n self.whiteCanCastleKside = False\n self.whiteCanCastleQside = False\n self.board.move_piece(7, 0, 7, 3)\n return True\n\n elif piece_color == \"black\":\n if self.blackCanCastleKside and (from_row == 0 and from_col == 4) and (to_row == from_row) and (to_col == 6):\n # black kingside Castle\n if (self.board.squares[0][5] == None and self.board.squares[0][6] == None):\n if not self.testing:\n self.blackCanCastleKside = False\n self.blackCanCastleQside = False\n self.board.move_piece(0, 7, 0, 5)\n return True\n\n if self.blackCanCastleQside and (from_row == 0 and from_col == 4) and (to_row == from_row) and (to_col == 2):\n # black queenside Castle\n if (self.board.squares[0][3] == None and self.board.squares[0][2] == None and self.board.squares[0][1] == None):\n if not self.testing:\n self.blackCanCastleKside = False\n self.blackCanCastleQside = False\n self.board.move_piece(0, 0, 0, 3)\n return True\n\n return False", "def castling(self, turn, ai):\n\n if self.board[self.coords.index(piece_class.KING_LOCATION[turn])].move_track == True:\n return None\n \n \n castling_queenside = [self.coords.index(piece_class.KING_LOCATION[turn]), self.coords.index(piece_class.KING_LOCATION[turn]) - 1,\n self.coords.index(piece_class.KING_LOCATION[turn]) - 2, self.coords.index(piece_class.KING_LOCATION[turn]) - 3,\n self.coords.index(piece_class.KING_LOCATION[turn]) - 4]\n castling_kingside = [self.coords.index(piece_class.KING_LOCATION[turn]), self.coords.index(piece_class.KING_LOCATION[turn]) + 1,\n self.coords.index(piece_class.KING_LOCATION[turn]) + 2, self.coords.index(piece_class.KING_LOCATION[turn]) + 3]\n \n if self.castling_valid(turn, castling_kingside):\n self.board[self.coords.index(piece_class.KING_LOCATION[turn])].possible_moves.append(self.coords[castling_kingside[2]])\n \n if self.castling_valid(turn, castling_queenside):\n self.board[self.coords.index(piece_class.KING_LOCATION[turn])].possible_moves.append(self.coords[castling_queenside[2]])", "def black_kingside_castling(self):\n return (self.castling[2] and self.empty((1, 7)) and self.empty((1, 8))\n and not self.attacked((1, 6), WHITE) and not\n self.attacked((1, 7), WHITE))", "def update_castle_rights(self, move):\r\n #deleting the right to castle on moving k/r\r\n #King\r\n if move.pieceMoved == 'wk':\r\n self.cr_castle_r.wks = False\r\n self.cr_castle_r.wqs = False\r\n elif move.pieceMoved == 'bk':\r\n self.cr_castle_r.bks = False\r\n self.cr_castle_r.bqs = False\r\n\r\n #Rook\r\n elif move.pieceMoved == 'wR':\r\n if move.sr == 7:\r\n if move.sc == 0:\r\n self.cr_castle_r.wqs = False\r\n elif move.sc == 7:\r\n self.cr_castle_r.wks = False\r\n elif move.pieceMoved == 'bR':\r\n if move.sr == 0:\r\n if move.sc == 0:\r\n self.cr_castle_r.bqs = False\r\n elif move.sc == 7:\r\n self.cr_castle_r.bks = False", "def check_illegal_move(self, player, action):\n available_actions = self.filter_actions(player)\n if action not in available_actions:\n print('Illegal move! Please choose another move!')\n return False\n return True", "def report_invalid_move(self, move: BotMove):\n self.invalid_moves.append((self.round, self.turn, move))", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def king_adjust(self, turn):\n\n opposite_turn = next_turn(turn)\n\n original_location_index = (piece_class.KING_LOCATION[turn][0] + piece_class.KING_LOCATION[turn][1] * 8)\n \n# if self.board[original_location_index] == self.empty:\n# print(\"yo\")\n \n self.board[original_location_index].possible_moves = [i for i in self.board[original_location_index].possible_moves if i not in self.long_dict[opposite_turn]]\n \n\n\n #king_path = [i for i in self.board[original_location_index].possible_moves if i not in self.path_dict[opposite_turn]]\n\n #removes moving into check from king path. Can remove this functionality from the move method now.\n #self.board[original_location_index].possible_moves = king_path\n #king_path_index = [(i[0] + i[1]*8) for i in self.board[original_location_index].possible_moves]\n\n \n #for i in king_path:\n # if i in self.long_dict[opposite_turn] or self.check_dict[opposite_turn]:\n # print(\"king path is: \", king_path)\n # king_path.remove(i)\n # print(\"king path is now: \", king_path)\n \n\n\n\n\n #for i in king_path_index:\n # enemy_piece = self.board[i]\n # self.board[i] = self.board[original_location_index]\n # self.board[original_location_index] = self.empty\n # self.loads_pathways(turn)\n # if self.coords[i] in self.path_dict[opposite_turn]:\n # print(\"yo\")\n \n #self.board[original_location_index] = self.board[i]\n #self.board[i] = enemy_piece", "def untuck(self):\n self.move_to_neutral()", "def move_valid(move):\n return True", "def white_kingside_castling(self):\n return (self.castling[0] and self.empty((10, 7))\n and self.empty((10, 8)) and not self.attacked((10, 6), BLACK)\n and not self.attacked((10, 7), BLACK))", "def test_move_knight_illegally(self):\n self.c.board = [[(0, 0) for i in range(8)] for i in range(8)]\n for piece in [('N', True), ('N', False)]:\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n dests = [col + row for col in 'abcdefgh' for row in '12345678']\n for dest in dests:\n if dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n continue\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def castling_valid(self, turn, direction):\n \n opposite_colour = next_turn(turn)\n\n \n if self.board[direction[0]] and self.board[direction[-1]] != self.empty:\n if ((self.board[direction[0]].graphic) == piece_class.PIECEDICT[turn][piece_class.King] and \n (self.board[direction[-1]].graphic) == piece_class.PIECEDICT[turn][piece_class.Rook]):\n if self.board[direction[0]].move_track == False and self.board[direction[-1]].move_track == False:\n for i in self.path_dict[opposite_colour]:\n if i in self.coords:\n if self.coords.index(i) == direction[0]:\n \n return False\n \n if self.coords.index(i) == direction[1]:\n \n return False\n \n if self.coords.index(i) == direction[2]:\n \n return False\n \n if len(direction) == 4:\n if self.board[direction[1]] == self.empty:\n if self.board[direction[2]] == self.empty:\n \n return True\n \n if len(direction) == 5:\n if self.board[direction[1]] == self.empty:\n if self.board[direction[2]] == self.empty:\n if self.board[direction[3]] == self.empty:\n \n return True\n \n return False", "def is_rook_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same column or row\n if ((from_row != to_row and from_col != to_col) or\n (from_row == to_row and from_col == to_col)):\n return False\n\n # check if any pieces are in the way of destination\n if from_row != to_row:\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n if from_col != to_col:\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n dm = abs(to_row - from_row)\n\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)\n\n # Casting: Rook invalidation\n if retVal and (from_row == 0 or from_row == 7):\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n if piece_color == \"white\":\n if from_col == 0:\n self.whiteCanCastleQside = False\n elif from_col == 7:\n self.whiteCanCastleKside = False\n else:\n if from_col == 0:\n self.blackCanCastleQside = False\n elif from_col == 7:\n self.blackCanCastleKside = False\n\n return retVal", "def unaway(self):\n self.away()", "def test_move_knight_legally_blocked(self):\n for piece in [('N', True), ('N', False)]:\n self.c.board = \\\n [[('K', piece[1]) for i in range(8)] for i in range(8)]\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def is_valid(self, layer: int, index: int, tower) -> bool:\r\n tower = copy.deepcopy(tower)\r\n tower.move_piece(layer, index)\r\n \r\n if tower.will_fall():\r\n del tower\r\n return False\r\n else:\r\n del tower\r\n return True", "def move2(self):\n\n options = self.location.exits.keys()\n for key in options:\n if self.location.exits[key] == p.location:\n self.location.objects.remove(a)\n self.location = p.location\n self.location.objects.append(a)\n print('fred entered the room')\n self.attack(['attack', str(p.name)])\n break\n else:\n self.move1()", "def death(self):\n logging.debug(\"Wolf died.\")\n self.pack = False\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)", "def test_move_over_terrain(self):\n # move over Water (0 extra)\n b1 = board.Board(self.small_ter)\n start = np.array((0, 1), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 1\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)\n #\n # move over Lava (0 extra)\n start = np.array((5, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 6\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)\n #\n # move over Barrier (illegal)\n start = np.array((2, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 0\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)\n #\n # move over Rock (0 extra)\n start = np.array((2, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 2\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)", "def test_bad_turn(self):\n board = Board()\n player1 = LegitPlayer()\n player2 = BadTurnPlayer()\n player1.start_of_game()\n player2.start_of_game()\n player_guard1 = PlayerGuard(player1)\n player_guard2 = PlayerGuard(player2)\n\n # set ids\n p1id = uuid.uuid4() \n p2id = uuid.uuid4() \n player_guard1.set_id(p1id)\n player_guard2.set_id(p2id)\n\n board.place_worker(*player_guard1.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard1.place_worker(board))\n\n self.assertRaises(PlayerInvalidTurn, player_guard2.play_turn, board)", "def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.rocket.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.rocket.moving_left = False\n elif event.key == pygame.K_UP:\n self.rocket.moving_up = False\n elif event.key == pygame.K_DOWN:\n self.rocket.moving_down = False", "def move(self, action):\n self.time += 1\n\n # If ship is destroyed ship can only contemplate sadness and despair\n if not action or not self.is_playable():\n return None\n\n self.actualise = False\n\n if self.leroy_time == 1:\n self.back_to_normal()\n if self.leroy_time > 0:\n self.leroy_time -= 1\n\n # there is a chance that the ia enter in leroy mode\n # the ia goes mad for some time, acting randomly\n # added to allow the ships to explore the possible actions and not stay passive\n if not self.player and self.leroy_time == 0 and self.agent.behavior == \"network\" and random() < LEROY_RATE:\n self.leroy_jenkins()\n\n # training reward depending on position\n # self.agent.reward = self.go_bottom_reward()\n\n if isinstance(action, ActionOneHot):\n if action.pointing:\n self.pointing = Point(randint(0, DEFAULT_WIDTH-1), randint(0, DEFAULT_HEIGHT-1))\n elif isinstance(action, Action):\n if action.pointing:\n self.pointing = action.pointing\n # print(\"action.pointing\", action.pointing)\n # print(\"turn \", self.direction)\n\n if action.thrust:\n self.thrust()\n if action.shoot:\n self.shoot()", "def interaction_hole(self) -> None:\n x_dead_char = self.moving_character.x_obj\n y_dead_char = self.moving_character.y_obj\n void = ob.Void(x_dead_char, y_dead_char)\n # Replacing character by a Void\n self.grid.obj_list[self.moving_character] = void\n del self.grid.character_list[self.index_character]\n self.grid.character_just_died = True", "def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)" ]
[ "0.6392645", "0.621743", "0.6217085", "0.61273366", "0.608021", "0.60771215", "0.60581195", "0.60546935", "0.6036227", "0.5978879", "0.5968202", "0.59250015", "0.5924627", "0.5899445", "0.58952796", "0.5890645", "0.58730435", "0.58223784", "0.5810963", "0.5798069", "0.57728237", "0.5772606", "0.57612365", "0.5756435", "0.57485723", "0.5747448", "0.5744258", "0.572601", "0.5714607", "0.5699179" ]
0.6648356
0
Converts an algebraic notation move to internal coordinates
def _AN_to_coords(self, move: str): orig_move = move extra_info = "" # remove all characters that don't matter when parsing for pointless_char in "x+#": move = move.replace(pointless_char, "") # Handle castling if CASTLE_QUEENSIDE in move: row = self._get_castling_row() return (row, 4), (row, 2), CASTLE_QUEENSIDE elif CASTLE_KINGSIDE in move: row = self._get_castling_row() return (row, 4), (row, 6), CASTLE_KINGSIDE # Pawn promotion if move[-2] == "=": extra_info = move[-1] if self.white_to_move else move[-1].lower() move = move[:-2] # Destination of move, this is the only guaranteed substring in the move dest_str = move[-2:] dest = State._EAN_coords_to_board_coords(dest_str) move = move[:-2] # Deduce what piece actually made the move, if there is no shown there is no pawn # Note in AN pieces are always uppercase and location is lowercase, # so this makes it simple to check if we have a piece or a location piece = "P" if move and move[0].isupper(): piece = move[0] move = move[1:] if not self.white_to_move: piece = piece.lower() # At this point the only info the move should contain is a hint on where the piece is coming from loc_hint = move possible_moves = self.get_all_moves() possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation for possible_move in possible_moves: row, col = State._EAN_coords_to_board_coords(possible_move[0:2]) if self.board[row][col] == piece: return (row, col), dest, extra_info raise ValueError("Algebraic notation parsing failed, no valid move found matching the given move " + orig_move + " with board state\n" + str(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_move_to_square(self, uci_move: str):\n chars = utils.split_string_to_chars(uci_move)\n square_from = ''.join(chars[0] + chars[1])\n square_to = ''.join(chars[2] + chars[3])\n return square_from, square_to", "def test_move_initialization():\r\n m = Move('A1', 'B2')\r\n assert m.get_from_str() == 'A1'\r\n assert m.get_to_str() == 'B2'\r\n assert m.get_from_xy() == (7, 0)\r\n assert m.get_to_xy() == (6, 1)", "def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col", "def _EAN_coords_to_board_coords(EAN_move: str) -> (int, int):\n assert EAN_move[0] in \"abcdefgh\" and EAN_move[1] in \"12345678\", \"failed to get \" + EAN_move\n\n\n col = ord(EAN_move[0]) - ord('a')\n row = 8 - int(EAN_move[1])\n return row, col", "def _parse_move(origin, destination, axis):\n # If only one set of coordinates is defined, make sure it's used to move things\n if destination is None:\n destination = origin\n origin = [0, 0]\n\n d = _parse_coordinate(destination)\n o = _parse_coordinate(origin)\n if axis == \"x\":\n d = (d[0], o[1])\n if axis == \"y\":\n d = (o[0], d[1])\n dx, dy = np.array(d) - o\n\n return dx, dy", "def algebraic_to_index(move: str) -> tuple[int, int]:\r\n return (RANK_TO_INDEX[move[1]], FILE_TO_INDEX[move[0]])", "def state_to_coords(self, state):\n x, _, theta, _ = state\n cart_coords = (x, self.y)\n pole_coords = ([x, x + 2*self.L*math.sin(theta)],\n [self.y, self.y + 2*self.L*math.cos(theta)])\n return cart_coords, pole_coords", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location", "def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out", "def get_move_positions(move):\n move_positions = []\n for (xi, yi) in move.orientation:\n (x, y) = (xi + move.x, yi + move.y)\n move_positions.append((y, x))\n return move_positions", "def get_move(moves):\n pass", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def computePosition(self, state):\n d = 0\n if state[5] == \"East\":\n d = 0\n elif state[5] == \"West\":\n d = 1\n elif state[5] == \"North\":\n d = 2\n else:\n d = 3\n return state[0]*64+state[1]*32+state[2]*16+state[3]*8+state[4]*4+d", "def move(self, state, move_cmd, i, j):\r\n new_state = self.clone_state(state)\r\n coordinate_change = self.action_dic[self.reflection_dic[move_cmd]]\r\n new_state[i][j], new_state[i + coordinate_change[0]][j + coordinate_change[1]] = \\\r\n new_state[i + coordinate_change[0]][j + coordinate_change[1]]\\\r\n , new_state[i][j]\r\n return new_state", "def state_to_position(self, state):\r\n dir = state % 4\r\n col = ((state - dir) / 4) % self.width\r\n row = (state - dir - col * 4) / (4 * self.width)\r\n return row, col, dir", "def __parse_move_line(self, line):\n parts = re.sub('\\(.*?\\)', '', line).split()\n x, y = None, None\n for part in parts[:0:-1]:\n axis = part.upper()[0]\n value = float(part[1:])\n if axis in ['Z', 'F']:\n parts.remove(part)\n elif axis == 'X':\n x = value\n parts.remove(part)\n elif axis == 'Y':\n y = value\n parts.remove(part)\n if x is None or y is None:\n return None\n template = parts[0] + ' X{:.6f} Y{:.6f} ' + ' '.join(parts[1:])\n return [template, x, y]", "def navigate1(commands: list) -> tuple:\n\n position: list = [0, 0] # x, y\n bearing: int = 90\n conversion: dict = {'N': 1, 'S': -1, 'E': 1, 'W': -1, 'L': -1, 'R': 1, 'F': 1}\n\n for command in commands: \n \n change: int = command['value'] * conversion[command['action']]\n\n if command['action'] in ['N', 'S']: \n position[1] += change\n elif command['action'] in ['E', 'W']: \n position[0] += change\n elif command['action'] in ['L', 'R']: \n bearing += change\n elif command['action'] == 'F': \n \n if bearing == 0: \n position[1] += change\n elif bearing == 90: \n position[0] += change\n elif bearing == 180: \n position[1] -= change\n elif bearing == 270:\n position[0] -= change \n else: \n raise ValueError(\"Invalid bearing\")\n\n else: \n raise ValueError(\"Invalid action\")\n\n if bearing > 270:\n bearing -= 360\n if bearing < 0: \n bearing += 360\n \n return tuple(position)", "def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self", "def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state", "def atom_to_internal_coordinates(self, verbose: bool = ...) -> None:\n ...", "def atom_to_internal_coordinates(self, verbose: bool = ...) -> None:\n ...", "def translate_to_algebraic(location):\n\n columns = 'abcdefghi'\n return columns[location[0]] + str(location[1] + 1)", "def move(self, m):\n if m not in \"RLUD\":\n raise ValueError(\n (\"Not a legal move: '{}', should be one of \" +\n \"the 'RLUD'.\").format(m))\n if m not in self.legal_moves:\n raise ValueError(\n (\"Not a legal move at this state: '{}', \" +\n \"should be one of the '{}'.\").format(m, self.legal_moves))\n\n posdiff = (0, 0)\n if m == 'L':\n posdiff = (0, 1)\n elif m == 'R':\n posdiff = (0, -1)\n elif m == 'U':\n posdiff = (1, 0)\n elif m == 'D':\n posdiff = (-1, 0)\n\n empty_position = self.get_position(0)\n newpuz = self.swap((empty_position[0] - posdiff[0],\n empty_position[1] - posdiff[1]))\n return newpuz", "def translate_to_point_O(self):\n self.translate(-self.pcs.origin)", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def move(self, position, direction):\n i, j = position\n direction %= 360\n if direction == 0:\n return (i - 1, j)\n if direction == 90:\n return (i, j + 1)\n if direction == 180:\n return (i + 1, j)\n if direction == 270:\n return (i, j - 1)\n raise ValueError(f\"Maze.move called with bad angle = {direction}\")", "def sim_move(self, state, move):\n out = ''\n for val in self.moves[move]:\n out += state[val]\n return out", "def move(self, *ms):\n\n # Map cube notation to corresponding clockwise movements.\n mmap = {\n 'L': (2, -1, -1), 'M': (2, 0, -1), 'R': (2, 1, 1),\n 'U': (1, -1, -1), 'E': (1, 0, 1), 'D': (1, 1, 1),\n 'F': (0, -1, -1), 'S': (0, 0, -1), 'B': (0, 1, 1),\n 'X': (2, 2, 1), 'Y': (1, 2, -1), 'Z': (0, 2, -1)\n }\n\n for m in ms:\n if m == '#':\n # End the scramble if '#' is found.\n self.scrambling = False\n else:\n axis, slice, dir = mmap[m[0].upper()]\n if \"'\" in m:\n # Invert the move.\n self.anims.add(Anim(self, axis, slice, -dir, self.speed))\n m = m.replace(\"'\", '')\n elif '2' in m:\n # Double the move.\n self.anims.add(Anim(self, axis, slice, dir, self.speed), Anim(self, axis, slice, dir, self.speed))\n else:\n self.anims.add(Anim(self, axis, slice, dir, self.speed))\n m = m + \"'\"\n\n # Push inverse move to history queue.\n self.moved.push(m)", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()" ]
[ "0.63731796", "0.5935379", "0.5798718", "0.5784685", "0.56920767", "0.5684874", "0.5637243", "0.55397224", "0.5485789", "0.5454764", "0.54507494", "0.54258645", "0.5371932", "0.5371518", "0.5364488", "0.53356934", "0.53237957", "0.532104", "0.5312109", "0.5268661", "0.5268661", "0.52580976", "0.5246433", "0.52400464", "0.5234542", "0.5229298", "0.5212394", "0.52080417", "0.52051896", "0.51960194" ]
0.69260526
0
Request the service (set thymio state values) exposed by the simulated thymio. A teleportation tool, by default in gazebo world frame. Be aware, this does not mean a reset (e.g. odometry values).
def thymio_state_service_request(self, position, orientation): rospy.wait_for_service('/gazebo/set_model_state') try: model_state = ModelState() model_state.model_name = self.thymio_name model_state.reference_frame = '' # the frame for the pose information model_state.pose.position.x = position[0] model_state.pose.position.y = position[1] model_state.pose.position.z = position[2] qto = quaternion_from_euler(orientation[0], orientation[0], orientation[0], axes='sxyz') model_state.pose.orientation.x = qto[0] model_state.pose.orientation.y = qto[1] model_state.pose.orientation.z = qto[2] model_state.pose.orientation.w = qto[3] # a Twist can also be set but not recomended to do it in a service gms = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) response = gms(model_state) return response except rospy.ServiceException, e: print "Service call failed: %s"%e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service(ants=0, tmo=200, waiton=-2) :\n return stow( ants, tmo, waiton, SERVICE );", "def startService(self):\n self.world.start()", "def init(self):\n try:\n yield self._connect_dbus()\n logger.info(\"Request the GSM resource\")\n yield WaitFSOResource('GSM', time_out=30)\n yield WaitDBus(self.ousage.RequestResource, 'GSM')\n yield self._turn_on()\n logger.info(\"register on the network\")\n register = yield self._register()\n #if register:\n #provider = yield tichy.Wait(self, 'provider-modified')\n \n self._keep_alive().start()\n \n ##network selection end\n \n except Exception, ex:\n logger.error(\"Error : %s\", ex)\n raise\n \n try:\n \n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n \n except Exception, ex:\n logger.error(\"Error in try retrieving config service : %s\", ex)\n \n try:\n \n ##call forwaring setting start\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n\n except Exception, ex:\n logger.error(\"Error in try call forwarding setting : %s\", ex)\n \n \n try:\n\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding','Reason',tichy.Text,value='unconditional', setter=self.ForwardingSetReason,options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"allconditional\"],model=tichy.List([ ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\",self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel =[('title','name')])\n \n self.SettingForwarding = tichy.settings.ToggleSetting('Call Forwarding', 'active', tichy.Text, value=self.GetForwardingStatus('unconditional'),setter=self.ToggleForwarding, options=['active','inactive'])\n \n \n except Exception, ex:\n logger.error(\"Error in try call forwarding setting list : %s\", ex)\n \n \n try:\n\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n ##call forwaring setting stop\n \n \n except Exception, ex:\n logger.error(\"Error in try Error in try call forwarding setting : %s\", ex)\n \n try:\n\n ##call identifaction setting start\n self.CallIdentification = tichy.settings.Setting('Network', 'Call Identification', tichy.Text, value=self.GetCallIdentification(), setter=self.SetCallIdentifaction, options=[\"on\",\"off\",\"network\"])\n ##call identifaction setting stop\n \n except Exception, ex:\n logger.error(\"Error in network identification setting: %s\", ex)\n \n try: \n ##network selection etc begin\n self.NetworkRegistration = tichy.settings.Setting('Network', 'Registration', tichy.Text, value=self.GetRegStatus(), setter=self.SetRegStatus, options=[\"registered\",\"not registered\"])\n \n \n except Exception, ex:\n logger.error(\"Error in network registration setting : %s\", ex)\n \n \n try:\n \n self.scanning = False\n self.NetworkList = tichy.List()\n self.ListLabel = [('title','name'),('subtitle','status')]\n \n self.scan_setting = tichy.settings.ListSetting('Network', 'List', tichy.Text, value=\"scan\", setter=self.run_scan, options=['scan'], model=self.NetworkList, ListLabel=self.ListLabel)\n \n except Exception, ex:\n logger.error(\"Error in network list setting : %s\", ex)\n #raise", "def ServiceRequest(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "def get_service(self):", "def init(self):\n logger.info(\"Turn on antenna power\")\n logger.info(\"Register on the network\")\n self.emit('provider-modified', \"Charlie Telecom\")\n self.network_strength = 100\n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding', 'Reason', tichy.Text, value='unconditional', setter=self.ForwardingSetReason, options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"all conditional\"], model=tichy.List([ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\", self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel = [('title','name')])\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n if len(self.logs) == 0: \n for i in range(3):\n call = Call('0049110', direction='out')\n self.logs.insert(0, call)\n yield None", "def send_state(self, req):\n state = GetStateResponse()\n state.update_underway = self.update_underway\n state.world = self.scenarios[self.scenario_idx][0]\n state.scenario = self.scenarios[self.scenario_idx][1]\n\n return state", "def run_agent(self):\n do_plot = False\n\n # -- Load and init the Helper mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(\n self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml, reward_goal, reward_intermediate, n_intermediate_rewards, reward_timeout, reward_sendcommand, timeout = init_mission(\n self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type, self.mission_seed,\n self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n\n # -- Define local capabilities of the agent (sensors)--#\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.LATEST_FRAME_ONLY)\n self.agent_host.setRewardsPolicy(MalmoPython.RewardsPolicy.KEEP_ALL_REWARDS)\n\n time.sleep(1)\n\n # -- Get the state of the world along with internal agent state...--#\n state_t = self.agent_host.getWorldState()\n\n # -- Get a state-space model by observing the Orcale/GridObserver--#\n if state_t.is_mission_running:\n # -- Make sure we look in the right direction when observing the surrounding (otherwise the coordinate system will rotated by the Yaw !) --#\n # Look East (towards +x (east) and +z (south) on the right, i.e. a std x,y coordinate system) yaw=-90\n self.agent_host.sendCommand(\"setPitch 20\")\n time.sleep(1)\n self.agent_host.sendCommand(\"setYaw -90\")\n time.sleep(1)\n\n # -- Basic map --#\n state_t = self.agent_host.getWorldState()\n\n if state_t.number_of_observations_since_last_state > 0:\n msg = state_t.observations[-1].text # Get the details for the last observed state\n oracle_and_internal = json.loads(msg) # Parse the Oracle JSON\n grid = oracle_and_internal.get(u'grid', 0)\n xpos = oracle_and_internal.get(u'XPos', 0)\n zpos = oracle_and_internal.get(u'ZPos', 0)\n ypos = oracle_and_internal.get(u'YPos', 0)\n yaw = oracle_and_internal.get(u'Yaw', 0)\n pitch = oracle_and_internal.get(u'Pitch', 0)\n\n # -- Parste the JOSN string, Note there are better ways of doing this! --#\n full_state_map_raw = str(grid)\n full_state_map_raw = full_state_map_raw.replace(\"[\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"]\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"u'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\" \", \"\")\n aa = full_state_map_raw.split(\",\")\n vocs = list(set(aa))\n for word in vocs:\n for i in range(0, len(aa)):\n if aa[i] == word:\n aa[i] = vocs.index(word)\n\n X = np.asarray(aa);\n nn = int(math.sqrt(X.size))\n X = np.reshape(X, [nn, nn]) # Note: this matrix/table is index as z,x\n\n # -- Visualize the discrete state-space --#\n if do_plot:\n print yaw\n plt.figure(1)\n imgplot = plt.imshow(X.astype('float'), interpolation='none')\n plt.pause(4)\n # plt.show()\n\n # -- Define the unique states available --#\n state_wall = vocs.index(\"stained_hardened_clay\")\n state_impossible = vocs.index(\"stone\")\n state_initial = vocs.index(\"emerald_block\")\n state_goal = vocs.index(\"redstone_block\")\n\n # -- Extract state-space --#\n offset_x = 100 - math.floor(xpos);\n offset_z = 100 - math.floor(zpos);\n\n state_space_locations = {}; # create a dict\n\n for i_z in range(0, len(X)):\n for j_x in range(0, len(X)):\n if X[i_z, j_x] != state_impossible and X[i_z, j_x] != state_wall:\n state_id = \"S_\" + str(int(j_x - offset_x)) + \"_\" + str(int(i_z - offset_z))\n state_space_locations[state_id] = (int(j_x - offset_x), int(i_z - offset_z))\n if X[i_z, j_x] == state_initial:\n state_initial_id = state_id\n loc_start = state_space_locations[state_id]\n elif X[i_z, j_x] == state_goal:\n state_goal_id = state_id\n loc_goal = state_space_locations[state_id]\n\n # -- Generate state / action list --#\n # First define the set of actions in the defined coordinate system \n actions = {\"west\": [-1, 0], \"east\": [+1, 0], \"north\": [0, -1], \"south\": [0, +1]}\n state_space_actions = {}\n for state_id in state_space_locations:\n possible_states = {}\n for action in actions:\n # -- Check if a specific action is possible --#\n delta = actions.get(action)\n state_loc = state_space_locations.get(state_id)\n state_loc_post_action = [state_loc[0] + delta[0], state_loc[1] + delta[1]]\n\n # -- Check if the new possible state is in the state_space, i.e., is accessible --#\n state_id_post_action = \"S_\" + str(state_loc_post_action[0]) + \"_\" + str(\n state_loc_post_action[1])\n if state_space_locations.get(state_id_post_action) != None:\n possible_states[state_id_post_action] = 1\n\n # -- Add the possible actions for this state to the global dict --#\n state_space_actions[state_id] = possible_states\n\n # -- Kill the agent/mission --#\n agent_host.sendCommand(\"tp \" + str(0) + \" \" + str(0) + \" \" + str(0))\n time.sleep(2)\n\n # -- Save the info an instance of the StateSpace class --\n self.state_space.state_actions = state_space_actions\n self.state_space.state_locations = state_space_locations\n self.state_space.start_id = state_initial_id\n self.state_space.start_loc = loc_start\n self.state_space.goal_id = state_goal_id\n self.state_space.goal_loc = loc_goal\n\n # -- Reward location and values --#\n # OPTIONAL: If you want to account for the intermediate rewards \n # in the Random/Simple agent (or in your analysis) you can \n # obtain ground-truth by teleporting with the tp command \n # to all states and detect whether you recieve recieve a \n # diamond or not using the inventory field in the oracle variable \n #\n # As default the state_space_rewards is just set to contain \n # the goal state which is found above.\n # \n state_space_rewards = {}\n state_space_rewards[state_goal_id] = reward_goal\n\n # HINT: You can insert your own code for getting \n # the location of the intermediate rewards\n # and populate the state_space_rewards dict \n # with more information (optional). \n # WARNING: This is a bit tricky, please consult tutors before starting\n\n # -- Set the values in the state_space container --#\n self.state_space.reward_states = state_space_rewards\n self.state_space.reward_states_n = n_intermediate_rewards + 1\n self.state_space.reward_timeout = reward_timeout\n self.state_space.timeout = timeout\n self.state_space.reward_sendcommand = reward_sendcommand\n else:\n self.state_space = None\n # -- End if observations --#\n\n return", "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "def main():\n\n GAME = \"Assignment1-Taxi-v2\"\n env = gym.make(GAME)\n n_state = env.observation_space.n\n n_action = env.action_space.n\n env = Monitor(env, \"taxi_simple\", force=True)\n\n s = env.reset()\n steps = 100\n for step in range(steps):\n env.render()\n action = int(input(\"Please type in the next action:\"))\n s, r, done, info = env.step(action)\n print(s)\n print(r)\n print(done)\n print(info)\n\n # close environment and monitor\n env.close()", "def initService(self):", "def service(self):\n pass", "def ControlSsm(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ControlSsm(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self):\n\n\t\t# Initialize the action server\n\t\tself._as = actionlib.ActionServer('/action_ros_iot',\n\t\t\t\t\t\t\t\t\t\t\tmsgRosIotAction,\n\t\t\t\t\t\t\t\t\t\t\tself.on_goal,\n\t\t\t\t\t\t\t\t\t\t\tauto_start = False)\n\t\t# * self.on_goal - Pointer of the function to be called \n\t\t# when a goal is received\n\n\t\t# * self.on_cancel - Pointer of the function to be called\n\t\t# when a cancel req is received\n\n\n\t\t# Read and Store the IOT Configuration from parameter server\n\t\tparam_config_iot = rospy.get_param('config_pyiot')\n\n\t\t# Loading the MQTT Parameters\n\t\tself._config_mqtt_server_url = param_config_iot['mqtt']['server_url']\n\t\tself._config_mqtt_server_port = param_config_iot['mqtt']['server_port']\n\t\tself._config_mqtt_sub_topic = param_config_iot['mqtt']['topic_sub']\n\t\tself._config_mqtt_pub_topic = param_config_iot['mqtt']['topic_pub']\n\t\tself._config_mqtt_qos = param_config_iot['mqtt']['qos']\n\t\tself._config_mqtt_sub_cb_ros_topic = param_config_iot['mqtt']['sub_cb_ros_topic']\n\n\t\t# Loading the Google Sheet Parameter\n\t\tself._config_sheet_id = param_config_iot['google_apps']['spread_sheet_id']\n\t\tself._config_sheet_url = param_config_iot['google_apps']['spread_sheet_url']\n\t\tself._config_email_id = param_config_iot['google_apps']['email_id']\n\t\trospy.logwarn(\"email_id {}\".format(self._config_email_id))\n\t\t\n\t\t# Overwriting for Eyantra's Gsheet\n\t\t# self._config_sheet_url = \"https://script.google.com/macros/s/AKfycbw5xylppoda-8HPjt2Tzq4ShU_Xef-Ik-hEtBPcPk0gdGw8095j4RZ7/exec\"\n\n\t\t# Loading the dictionary for each sheets\n\t\tself.dict_IncomingOrders = {\"id\": \"IncomingOrders\",\n\t\t\t\t\t\t\t \"Team Id\": \"VB#1083\",\n\t\t\t\t\t\t\t \"Unique Id\": \"iOeCqZLI\",\n\t\t\t\t\t\t\t \"Order ID\": \"NA\",\n\t\t\t\t\t\t\t \"Order Date and Time\": \"NA\",\n\t\t\t\t\t\t\t \"Item\": \"NA\",\n\t\t\t\t\t\t\t \"Priority\": \"NA\",\n\t\t\t\t\t\t\t \"Order Quantity\": \"NA\",\n\t\t\t\t\t\t\t \"City\": \"NA\",\n\t\t\t\t\t\t\t \"Longitude\": \"NA\",\n\t\t\t\t\t\t\t \"Latitude\": \"NA\",\n\t\t\t\t\t\t\t \"Cost\": \"NA\"}\n\n\t\tself.dict_Inventory = {\"id\": \"Inventory\",\n\t\t\t\t\t\t\t\t\"Team Id\": \"VB#1083\",\n\t\t\t\t\t\t\t\t\"Unique Id\": \"iOeCqZLI\",\n\t\t\t\t\t\t\t\t\"SKU\": \"NA\",\n\t\t\t\t\t\t\t\t\"Item\": \"NA\",\n\t\t\t\t\t\t\t\t\"Priority\": \"NA\",\n\t\t\t\t\t\t\t\t\"Storage Number\": \"NA\",\n\t\t\t\t\t\t\t\t\"Cost\": \"NA\",\n\t\t\t\t\t\t\t\t\"Quantity\": \"NA\",\n\t\t\t\t\t\t\t\t}\n\n\t\tself.dict_OrdersDispatched = {\"id\": \"OrdersDispatched\",\n\t\t\t\t\t\t\t\t\t\"Team Id\": \"VB#1083\",\n\t\t\t\t\t\t\t\t\t\"Unique Id\": \"iOeCqZLI\",\n\t\t\t\t\t\t\t\t\t\"Order ID\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"City\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Item\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Priority\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Dispatch Quantity\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Cost\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Dispatch Status\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Dispatch Date and Time\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"email_id\": self._config_email_id}\n\n\t\tself.dict_OrdersShipped = {\"id\":\"OrdersShipped\",\n\t\t\t\t\t\t\t\t\t\"Team Id\":\"VB#1083\",\n\t\t\t\t\t\t\t\t\t\"Unique Id\":\"iOeCqZLI\",\n\t\t\t\t\t\t\t\t\t\"Order ID\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"City\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Item\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Priority\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Shipped Quantity\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Cost\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Shipped Status\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Shipped Date and Time\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"Estimated Time of Delivery\":\"NA\",\n\t\t\t\t\t\t\t\t\t\"email_id\": self._config_email_id}\n\t\t\t\t\t\t\t\t\t\n\n\t\t# Initialize the ros topic '/ros_iot_bridge/mqtt/sub' so that\n\t\t# other ROS nodes can listen to MQTT messages\n\t\tself._handle_ros_pub = rospy.Publisher(self._config_mqtt_sub_cb_ros_topic,\n\t\t\t\t\t\t\t\t\t\t\t\tmsgMqttSub,\n\t\t\t\t\t\t\t\t\t\t\t\tqueue_size = 10)\n\n\t\t# Subscribe to MQTT topic 'eyrc/iOeCqZLl/iot_to_ros' so that\n\t\t# it can later on publish the message to '/ros_iot_bridge/mqtt/sub'\n\t\t# for other ROS Nodes to listen\n\t\tret = iot.mqtt_subscribe_thread_start(self.mqtt_sub_callback,\n\t\t\t\t\t\t\t\t\t\t\t\tself._config_mqtt_server_url,\n\t\t\t\t\t\t\t\t\t\t\t\tself._config_mqtt_server_port,\n\t\t\t\t\t\t\t\t\t\t\t\tself._config_mqtt_sub_topic,\n\t\t\t\t\t\t\t\t\t\t\t\tself._config_mqtt_qos)\n\n\t\t# * mqtt_sub_callback - Function which will be called when this node\n\t\t# receives the message from MQTT\n\t\t# * other arguments - probably required to make connection to\n\t\t# appropiate server (assumption)\n\n\t\tif (ret == 0):\n\t\t\trospy.loginfo(\"[BRIDGE] MQTT Subscribe Thread Started\")\n\n\t\telse:\n\t\t\trospy.logerr(\"[BRIDGE] Failed to start MQTT Subscribe Thread\")\n\n\t\t# Start the Action Server\n\t\tself._as.start()\n\n\t\trospy.loginfo(\"[BRIDGE] Started ROS IOT Bridge\")", "def homeArmService(req):\n global robot\n\n # home the arm\n robot.arm.go_home()\n\n # return status\n return homeArmResponse(True)", "def StartControlService(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.get_state())\n console.print(f\"[{ip}] Light {id} State:\\n{json.dumps(resp, indent=2)}\")", "def main(argv):\n\n IP_ADDRESS = \"10.0.1.16\"\n\n robot = MistyRobot(IP_ADDRESS)\n\n print \"HELP: %s\\r\\n\" % pprint(robot.GetHelp())\n print \"DEVICE INFORMATION: %s\\r\\n\" % pprint(robot.GetDeviceInformation())\n print \"BATTERY LEVEL: %s\\r\\n\" % pprint(robot.GetBatteryLevel())\n print \"AUDIO CLIPS: %s\\r\\n\" % pprint(robot.GetListOfAudioClips())\n print \"AUDIO FILES: %s\\r\\n\" % pprint(robot.GetListOfAudioFiles())\n print \"VIDEO CLIPS: %s\\r\\n\" % pprint(robot.GetListOfVideoClips())\n\n print \"SENSORS: %s\\r\\n\" % pprint(robot.GetStringSensorValues())\n\n robot.LocomotionTrack(leftTrackSpeed=3, rightTrackSpeed=3)\n robot.Stop(delay=4)\n\n # This API call doesn't seem to work properly or consistently,\n # only moves head down, regardless of values\n #robot.MoveHead(pitch=-5, roll=0, yaw=0, velocity=4)\n #robot.MoveHead(pitch=5, roll=0, yaw=0, velocity=4, delay=3)\n\n # This API call doesn't seem to work\n robot.DriveTime(linearVelocity=3, angularVelocity=5, timeMS=5000, degrees=0)\n\n # This API call doesn't seem to work\n robot.Drive(linearVelocity=3, angularVelocity=5)\n robot.Stop(delay=4)\n\n robot.StartFaceTraining(faceId=\"person1\")\n robot.CancelFaceTraining(delay=5)\n\n print \"LEARNED FACES: %s\\r\\n\" % pprint(robot.GetLearnedFaces())\n\n robot.ClearLearnedFaces()\n\n print \"LEARNED FACES AFTER CLEAR: %s\\r\\n\" % pprint(robot.GetLearnedFaces())\n\n robot.SetMood(\"sad\")\n robot.SetMood(\"angry\", delay=3)\n robot.SetMood(\"groggy\", delay=3)\n robot.SetMood(\"confused\", delay=3)\n robot.SetMood(\"content\", delay=3)\n robot.SetMood(\"concerned\", delay=3)\n robot.SetMood(\"unamused\", delay=3)\n robot.SetMood(\"happy\", delay=3)\n robot.SetMood(\"love\", delay=3)", "def simulation_step(self):\n rospy.wait_for_service(\"send_wifi_com\")\n try:\n send_wifi = rospy.ServiceProxy(\"send_wifi_com\", srvs.SendWifiCom)\n send_wifi(\"I am vehicle #%i\" % self.vehicle_id)\n except rospy.ServiceException, e:\n raise NameError(\"Service call failed: %s\" % e)\n super(WifiVehicle, self).simulation_step()", "def on_start(self):\n self._state = service.ServiceStateMachine(['READY'], default_state='READY')\n self._temperature = 50\n self._set_state_internal(force=True)", "async def async_service_handler(service):\n api_command = MAP_SERVICE_API[service.service][0]\n data = service.data.copy()\n addon = data.pop(ATTR_ADDON, None)\n snapshot = data.pop(ATTR_SNAPSHOT, None)\n payload = None\n\n # Pass data to Opp.io API\n if service.service == SERVICE_ADDON_STDIN:\n payload = data[ATTR_INPUT]\n elif MAP_SERVICE_API[service.service][3]:\n payload = data\n\n # Call API\n try:\n await oppio.send_command(\n api_command.format(addon=addon, snapshot=snapshot),\n payload=payload,\n timeout=MAP_SERVICE_API[service.service][2],\n )\n except OppioAPIError as err:\n _LOGGER.error(\"Error on Opp.io API: %s\", err)", "def run_agent(self):\n\n #-- Load and init mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml,reward_goal,reward_intermediate,n_intermediate_rewards,reward_timeout,reward_sendcommand, timeout = init_mission(self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type, self.mission_seed, self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n\n #-- Define local capabilities of the agent (sensors)--#\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.LATEST_FRAME_ONLY)\n self.agent_host.setRewardsPolicy(MalmoPython.RewardsPolicy.KEEP_ALL_REWARDS)\n\n # Initialise cumulative reward\n reward_cumulative = 0.0\n\n state_t = self.agent_host.getWorldState()\n\n while state_t.is_mission_running:\n # Wait 0.5 sec\n time.sleep(0.5)\n\n if state_t.is_mission_running:\n actionIdx = random.randint(0, 3)\n print(\"Requested Action:\",self.AGENT_ALLOWED_ACTIONS[actionIdx])\n\n # Now try to execute the action givne a noisy transition model\n actual_action = self.__ExecuteActionForRandomAgentWithNoisyTransitionModel__(actionIdx, 0.05);\n print(\"Actual Action:\",actual_action)\n\n # Collect the number of rewards and add to reward_cumulative\n # Note: Since we only observe the sensors and environment every a number of rewards may have accumulated in the buffer\n for reward_t in state_t.rewards:\n reward_cumulative += reward_t.getValue()\n self.solution_report.addReward(reward_t.getValue(), datetime.datetime.now())\n print(\"Reward_t:\",reward_t.getValue())\n print(\"Cumulative reward so far:\",reward_cumulative)\n\n # Check if anything went wrong along the way\n for error in state_t.errors:\n print(\"Error:\",error.text)\n\n # Handle the sensor input\n xpos = None\n ypos = None\n zpos = None\n yaw = None\n pitch = None\n if state_t.number_of_observations_since_last_state > 0: # Has any Oracle-like and/or internal sensor observations come in?\n msg = state_t.observations[-1].text # Get the detailed for the last observed state\n oracle = json.loads(msg) # Parse the Oracle JSON\n\n # Oracle\n grid = oracle.get(u'grid', 0) #\n\n # GPS-like sensor\n xpos = oracle.get(u'XPos', 0) # Position in 2D plane, 1st axis\n zpos = oracle.get(u'ZPos', 0) # Position in 2D plane, 2nd axis (yes Z!)\n ypos = oracle.get(u'YPos', 0) # Height as measured from surface! (yes Y!)\n\n # Standard \"internal\" sensory inputs\n yaw = oracle.get(u'Yaw', 0) # Yaw\n pitch = oracle.get(u'Pitch', 0) # Pitch\n\n # Vision\n if state_t.number_of_video_frames_since_last_state > 0: # Have any Vision percepts been registred ?\n frame = state_t.video_frames[0]\n\n #-- Print some of the state information --#\n print(\"Percept: video,observations,rewards received:\",state_t.number_of_video_frames_since_last_state,state_t.number_of_observations_since_last_state,state_t.number_of_rewards_since_last_state)\n print(\"\\tcoordinates (x,y,z,yaw,pitch):\" + str(xpos) + \" \" + str(ypos) + \" \" + str(zpos)+ \" \" + str(yaw) + \" \" + str(pitch))\n\n # Get the new world state\n state_t = self.agent_host.getWorldState()\n\n # --------------------------------------------------------------------------------------------\n # Summary\n print(\"Summary:\")\n print(\"Cumulative reward = \" + str(reward_cumulative) )\n\n return", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def openCircuit(srv):", "def main():\n # get the service API URL\n params = demisto.params()\n environment_id = params.get('environment_id')\n region = params.get('region')\n tld = '.com'\n\n if region == 'EU':\n tld = '.eu'\n elif region == 'Asia':\n tld = '.asia'\n\n base_url = urljoin(f'https://api.pingone{tld}', f'/v1/environments/{environment_id}/')\n auth_url = urljoin(f'https://auth.pingone{tld}', f'/{environment_id}/as/token')\n\n client_id = demisto.params().get('credentials', {}).get('identifier')\n client_secret = demisto.params().get('credentials', {}).get('password')\n\n verify_certificate = not demisto.params().get('insecure', False)\n proxy = demisto.params().get('proxy', False)\n\n auth_params = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'base_url': base_url,\n 'auth_url': auth_url,\n }\n\n demisto.debug(f'Command being called is {demisto.command()}')\n\n commands = {\n 'test-module': test_module,\n 'pingone-unlock-user': unlock_user_command,\n 'pingone-deactivate-user': deactivate_user_command,\n 'pingone-activate-user': activate_user_command,\n 'pingone-set-password': set_password_command,\n 'pingone-add-to-group': add_user_to_group_command,\n 'pingone-remove-from-group': remove_from_group_command,\n 'pingone-get-groups': get_groups_for_user_command,\n 'pingone-get-user': get_user_command,\n 'pingone-create-user': create_user_command,\n 'pingone-update-user': update_user_command,\n 'pingone-delete-user': delete_user_command,\n }\n\n command = demisto.command()\n\n client = Client(\n auth_params=auth_params,\n base_url=base_url,\n verify=verify_certificate,\n proxy=proxy\n )\n\n try:\n if command in commands:\n human_readable, outputs, raw_response = commands[command](client, demisto.args())\n return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)\n\n # Log exceptions\n except Exception as e:\n return_error(f'Failed to execute {command} command. Error: {str(e)}')", "def test_drive(self):\n global ENV, TRAFFIC_LIGHT\n ENV = simpy.Environment()\n TRAFFIC_LIGHT = TrafficLight()\n bus = Bus(nr=0)\n ENV.process(bus.drive())\n ENV.run()\n self.assertEqual(bus.movement.to_pos, 600)", "def main():\n # parse command line arguments\n\n nodename=\"SawyerJointServer\"\n #Initialize object\n sawyer_obj = Sawyer_impl()\n\n #Create transport, register it, and start the server\n print(\"Registering Transport\")\n \n #port = args.port\n #if (port == 0):\n port =8884 #t.GetListenPort()\n\n #Register the service type and the service\n print(\"Starting Service\")\n \n #RR.RobotRaconteurNode.s.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.easy\")\n \n with RR.ServerNodeSetup(nodename,port):\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.geometry\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.uuid\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.datetime\")\n\tRRN.RegisterServiceTypeFromFile(\"com.robotraconteur.identifier\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.sensordata\")\n\tRRN.RegisterServiceTypeFromFile(\"com.robotraconteur.resource\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.device\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.units\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.joints\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.trajectory\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.datatype\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.signal\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.param\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.tool\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.payload\")\n RRN.RegisterServiceTypeFromFile(\"com.robotraconteur.robotics.robot\")\n RRN.RegisterService(\"Sawyer\",\n \"com.robotraconteur.robotics.robot.Robot\",\n sawyer_obj)\n time.sleep(2)\n sawyer_obj.start()\n\n print(\"Service started, connect via\")\n print(\"tcp://localhost:\" + str(port) + \"/SawyerJointServer/Sawyer\")\n raw_input(\"press enter to quit...\\r\\n\")\n\n sawyer_obj.close()\n \n # This must be here to prevent segfault\n #RR.RobotRaconteurNode.s.Shutdown()", "def _init_service(self):\n self.robot_variables.check_variables()\n # setting launch id for report portal service\n self.robot_service.init_service(endpoint=self.robot_variables.endpoint,\n project=self.robot_variables.project,\n uuid=self.robot_variables.uuid)" ]
[ "0.62242675", "0.5637222", "0.55345064", "0.53431636", "0.53285766", "0.530147", "0.52767605", "0.5229516", "0.5223119", "0.5223119", "0.51513463", "0.51237404", "0.5123625", "0.5117781", "0.5117781", "0.5034022", "0.5029458", "0.5021611", "0.49992025", "0.49782804", "0.49595025", "0.49507794", "0.4942103", "0.4936607", "0.49320418", "0.4926656", "0.49187368", "0.49158165", "0.48662764", "0.48660862" ]
0.64819825
0
Run vcftools to sort/compress/index a vcf file
def vcf_compress(fn): ret = cmd_exe(f"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compilevcf(args):\n from jcvi.variation.str import LobSTRvcf\n\n p = OptionParser(compilevcf.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n folder, = args\n vcf_files = iglob(folder, \"*.vcf,*.vcf.gz\")\n for vcf_file in vcf_files:\n try:\n p = LobSTRvcf(columnidsfile=None)\n p.parse(vcf_file, filtered=False)\n res = p.items()\n if res:\n k, v = res[0]\n res = v.replace(',', '/')\n else:\n res = \"-1/-1\"\n num = op.basename(vcf_file).split(\".\")[0]\n print num, res\n except (TypeError, AttributeError) as e:\n p = TREDPARSEvcf(vcf_file)\n continue", "def GatherVcfs(\n b: hb.Batch,\n input_vcfs: List,\n disk_size: int,\n output_vcf_path: str = None,\n) -> Job:\n j = b.new_job('VQSR: FinalGatherVcf')\n j.image(utils.GATK_IMAGE)\n j.memory(f'16G')\n j.storage(f'{disk_size}G')\n j.declare_resource_group(\n output_vcf={'vcf.gz': f'{NAME}_gathered.vcf.gz', 'vcf.gz.tbi': f'{NAME}_gathered.vcf.gz.tbi'}\n )\n\n input_cmdl = ' '.join([f'--input {v}' for v in input_vcfs])\n j.command(\n f\"\"\"set -euo pipefail\n # --ignore-safety-checks makes a big performance difference so we include it in \n # our invocation. This argument disables expensive checks that the file headers \n # contain the same set of genotyped samples and that files are in order \n # by position of first record.\n gatk --java-options -Xms6g \\\\\n GatherVcfsCloud \\\\\n --gather-type BLOCK \\\\\n {input_cmdl} \\\\\n --output {j.output_vcf['vcf.gz']}\n tabix {j.output_vcf['vcf.gz']}\"\"\"\n )\n if output_vcf_path:\n b.write_output(j.output_vcf, f'{output_vcf_path}{NAME}_gathered{LABEL}')\n return j", "def index_vcf(vcf_file, threads=4, overwrite=False):\n cmd = \"bcftools index --threads %s -f %s\" % (threads, vcf_file)\n if filecheck(vcf_file):\n if nofile(vcf_file+\".csi\"):\n run_cmd(cmd)\n elif (os.path.getmtime(vcf_file+\".csi\") < os.path.getmtime(vcf_file)) or overwrite:\n run_cmd(cmd)", "def run_concat_vcfs(job, context, vcf_ids, tbi_ids):\n\n work_dir = job.fileStore.getLocalTempDir()\n\n vcf_names = ['chrom_{}.vcf.gz'.format(i) for i in range(len(vcf_ids))]\n out_name = 'genome.vcf.gz'\n\n for vcf_id, tbi_id, vcf_name in zip(vcf_ids, tbi_ids, vcf_names):\n job.fileStore.readGlobalFile(vcf_id, os.path.join(work_dir, vcf_name))\n job.fileStore.readGlobalFile(tbi_id, os.path.join(work_dir, vcf_name + '.tbi'))\n\n cmd = ['bcftools', 'concat'] + [vcf_name for vcf_name in vcf_names] + ['-O', 'z']\n \n with open(os.path.join(work_dir, out_name), 'wb') as out_file:\n context.runner.call(job, cmd, work_dir=work_dir, outfile = out_file)\n\n cmd = ['tabix', '-f', '-p', 'vcf', out_name]\n context.runner.call(job, cmd, work_dir=work_dir)\n\n out_vcf_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name))\n out_tbi_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name + '.tbi'))\n\n return out_vcf_id, out_tbi_id", "def compressVcf(vcfname,forceflag=False,remove=False):\n cvcfname = vcfname+\".gz\"\n pysam.tabix_compress(vcfname,cvcfname,force=forceflag)\n pysam.tabix_index(cvcfname,preset=\"vcf\",force=True)\n if remove:\n os.remove(vcfname)\n return cvcfname", "def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)", "def main(arguments):\n folder_list = glob.glob(f\"{arguments.f}/*/\")\n for d in folder_list:\n if \"GALEN\" in d: continue\n # get vcf file\n try:\n vcf = glob.glob(f\"{d}/*.vcf\")[0]\n except:\n raise FileNotFoundError(f\"{d} - vcf file not found\")\n \n print(f\"Processing {vcf}...\")\n f = load(vcf)\n final = find_gene(f, arguments.c)\n output_file = vcf.replace(\".vcf\", \"_analyzed.csv\")\n final.to_csv(output_file, index=False)\n print(\"Done!\")", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def main():\n parser = ArgumentParser()\n\n parser.add_argument(\"-C\", \"--clinvar\", dest=\"clinvar\",\n help=\"ClinVar VCF file\", metavar=\"CLINVAR\")\n parser.add_argument(\"-i\", \"--input\", dest=\"inputfile\",\n help=\"Input VCF file\", metavar=\"INPUT\")\n parser.add_argument(\"-F\", \"--output-format\", dest=\"format\",\n help=\"Output format (currently 'csv' or 'json')\",\n metavar=\"FORMAT\")\n parser.add_argument(\"-V\", \"--schema-version\", dest=\"schema_version\",\n help=\"Version to include report (JSON only)\",\n metavar=\"OUTVERSION\")\n parser.add_argument(\"-n\", \"--notes\", dest=\"notes\",\n help=\"Notes, as a JSON string, to include in report (JSON only)\",\n metavar=\"NOTES\")\n parser.add_argument(\"-g\", \"--genome-build\", dest=\"build\",\n help=\"Genome build to include in report (JSON only)\",\n metavar=\"GENOMEBUILD\")\n options = parser.parse_args()\n\n if sys.stdin.isatty():\n if options.inputfile:\n if options.inputfile.endswith('.vcf'):\n input_genome_file = open(options.inputfile)\n elif options.inputfile.endswith('.vcf.gz'):\n input_genome_file = gzip.open(options.inputfile)\n elif options.inputfile.endswith('.vcf.bz2'):\n input_genome_file = bz2.BZ2File(options.inputfile)\n else:\n raise IOError(\"Genome filename expected to end with ''.vcf',\" +\n \" '.vcf.gz', or '.vcf.bz2'.\")\n else:\n sys.stderr.write(\"Provide input VCF file\\n\")\n parser.print_help()\n sys.exit(1)\n else:\n input_genome_file = sys.stdin\n\n if options.clinvar:\n if options.clinvar.endswith('.vcf'):\n input_clinvar_file = open(options.clinvar)\n elif options.clinvar.endswith('.vcf.gz'):\n input_clinvar_file = gzip.open(options.clinvar)\n elif options.clinvar.endswith('.vcf.bz2'):\n input_clinvar_file = bz2.BZ2File(options.clinvar)\n else:\n raise IOError(\"ClinVar filename expected to end with ''.vcf',\" +\n \" '.vcf.gz', or '.vcf.bz2'.\")\n else:\n sys.stderr.write(\"Provide ClinVar VCF file\\n\")\n parser.print_help()\n sys.exit(1)\n\n output_format = \"csv\"\n if options.format:\n if options.format == \"csv\":\n output_format = \"csv\"\n elif options.format == \"json\":\n output_format = \"json\"\n\n if output_format == \"csv\":\n csv_out = csv.writer(sys.stdout)\n header = (\"Chromosome\", \"Position\", \"Name\", \"Significance\", \"Frequency\",\n \"Zygosity\", \"ACC URL\")\n csv_out.writerow(header)\n\n metadata = {}\n metadata[\"notes\"] = options.clinvar\n\n build = \"unknown\"\n if options.build:\n build = options.build\n metadata[\"genome_build\"] = build\n\n notes_json = {}\n if options.notes:\n notes_json[\"parameter\"] = options.notes\n try:\n notes_json = json.loads(options.notes)\n except:\n sys.stderr.write(\"Could not parse JSON notes field\\n\")\n\n json_report = {}\n json_report[\"schema_version\"] = options.schema_version\n json_report[\"notes\"] = notes_json\n json_report[\"metadata\"] = metadata\n json_report[\"variants\"] = []\n\n matching = match_to_clinvar(input_genome_file, input_clinvar_file)\n for var in matching:\n\n chrom = var[0]\n pos = var[1]\n ref_allele = var[2]\n alt_allele = var[3]\n name_acc = var[4]\n allele_freq = var[5]\n zygosity = var[6]\n\n for spec in name_acc:\n ele = {}\n ele[\"chrom\"] = REV_CHROM_INDEX[chrom]\n ele[\"pos\"] = pos\n ele[\"ref_allele\"] = ref_allele\n ele[\"alt_allele\"] = alt_allele\n ele[\"allele_freq\"] = allele_freq\n ele[\"zygosity\"] = zygosity\n\n url = \"http://www.ncbi.nlm.nih.gov/clinvar/\" + str(spec[0])\n name = spec[1]\n clnsig = spec[2]\n\n ele[\"acc_url\"] = url\n ele[\"name\"] = name\n ele[\"clinical_significance\"] = clnsig\n\n json_report[\"variants\"].append(ele)\n\n if output_format == \"csv\":\n data = (chrom, pos, name, clnsig, allele_freq, zygosity, url)\n csv_out.writerow(data)\n\n if output_format == \"json\":\n print json.dumps(json_report)", "def main(): \n \n # parse command line arguments\n parser = argparse.ArgumentParser(description='Runs variant calling on pileup file and stores in vfc file')\n parser.add_argument('--use-read-quality', default=False, action='store_true',\n help='tells the algorithm to estimate p from read qualities')\n parser.add_argument('--call-less-positions', default=False, action='store_true',\n help='tells the program to call less positions (not whole pileup file)')\n parser.add_argument('--input-file', default='merged-normal.pileup', type=str,\n help='path to input file in pileup format')\n parser.add_argument('--output-file', default='Make name from input name', type=str,\n help='name for the output vcf file. If not given, will be created from input file name')\n parser.add_argument('--p', default='0.99', type=float,\n help='probability estimate of one nucleotide read being correct, used by vc algorithm')\n parser.add_argument('--positions-to-call', default='10000', type=int,\n help='how many positions to call if call-less-positions set to true')\n args = parser.parse_args()\n if args.output_file == 'Make name from input name':\n args.output_file = args.input_file + '.vcf'\n \n variant_caller = VariantCaller()\n sample = 'SAMPLE1'\n \n # creates vcf file\n create_vcf_start = time.time()\n vcf = create_vcf_file(args.output_file, sample)\n create_vcf_end = time.time()\n print('Vcf header created. Elapsed time: {}'.format(create_vcf_end - create_vcf_start))\n\n main_loop_start = time.time()\n position_count = 0\n variant_caller_time = 0\n positions_with_variants = 0\n write_vcf_time = 0\n\n for pileup_line in pileup_reader(args.input_file):\n # calls variant for each pileup line\n variant_caller_start = time.time()\n variant_caller.call_variant(pileup_line, args.p, args.use_read_quality)\n if pileup_line['alts'] != '.':\n positions_with_variants += 1\n variant_caller_time += time.time() - variant_caller_start\n\n # writes line in VCF file\n write_vcf_start = time.time()\n write_vcf_line(pileup_line, vcf, sample)\n write_vcf_time = time.time() - write_vcf_start\n\n position_count += 1\n if args.call_less_positions and (position_count >= args.positions_to_call):\n break\n \n main_loop_end = time.time()\n total_running_time = main_loop_end - main_loop_start\n\n print('Processed {} positions. Found variants at {} positions.'.format(position_count, positions_with_variants))\n\n print('Total running time is {}'.format(total_running_time))\n print('Pileup reader: {}'.format(total_running_time - variant_caller_time - write_vcf_time))\n print('Variant calling: {}'.format(variant_caller_time))\n print('Vcf writing: {}'.format(write_vcf_time))", "def vcf(self):\n name = self.user + '.vcf'\n url = 'http://lkd.to/api/' + name\n r = requests.get(url)\n with open(name, 'wb') as code:\n code.write(r.content)", "def processfile(args, fh):\n if args.quick:\n scanner = quickScanZip(args, fh)\n else:\n scanner = findPKHeaders(args, fh)\n\n def checkarg(arg, ent):\n if not arg:\n return False\n return '*' in arg or ent.name in arg\n def checkname(a, b):\n if a and '*' in a: return True\n if b and '*' in b: return True\n l = 0\n if a: l += len(a)\n if b: l += len(b)\n return l > 1\n\n if args.verbose and not (args.cat or args.raw or args.save):\n print(\" 0304 need flgs mth stamp --crc-- compsize fullsize nlen xlen namofs xofs datofs endofs\")\n print(\" 0102 crea need flgs mth stamp --crc-- compsize fullsize nlen xlen clen dsk0 attr osattr datptr namofs xofs cmtofs endofs\")\n for ent in scanner:\n if args.cat or args.raw or args.save:\n if args.quick and isinstance(ent, CentralDirEntry) or \\\n not args.quick and isinstance(ent, LocalFileHeader):\n ent.loaditems(fh)\n do_cat = checkarg(args.cat, ent)\n do_raw = checkarg(args.raw, ent)\n do_save= checkarg(args.save, ent)\n\n do_name= checkname(args.cat, args.raw)\n\n if do_name:\n print(\"\\n===> \" + ent.name + \" <===\\n\")\n\n sys.stdout.flush()\n blks = zipraw(fh, ent)\n\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n if do_cat or do_save:\n blks = skipbytes(blks, 12, args)\n\n if do_cat:\n sys.stdout.buffer.writelines(zipcat(blks, ent))\n if do_raw:\n sys.stdout.buffer.writelines(blks)\n if do_save:\n savefile(args.outputdir, ent.name, zipcat(blks, ent))\n else:\n ent.loaditems(fh)\n if args.verbose or not args.quick:\n print(\"%08x: %s\" % (ent.pkOffset, ent))\n else:\n print(ent.summary())\n if hasattr(ent, \"comment\") and ent.comment and not args.dumpraw:\n print(ent.comment)\n if args.dumpraw and hasattr(ent, \"extraLength\") and ent.extraLength:\n print(\"%08x: XTRA: %s\" % (ent.extraOffset, binascii.b2a_hex(getbytes(fh, ent.extraOffset, ent.extraLength))))\n if args.dumpraw and hasattr(ent, \"comment\") and ent.comment:\n print(\"%08x: CMT: %s\" % (ent.commentOffset, binascii.b2a_hex(getbytes(fh, ent.commentOffset, ent.commentLength))))\n if args.dumpraw and isinstance(ent, LocalFileHeader):\n blks = zipraw(fh, ent)\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n\n blockdump(ent.dataOffset, blks)", "def process_csvs(conn: Connection, basedir: Path) -> None:\n process_files(conn, basedir/\"files.csv\")\n process_notes(conn, basedir/\"notes.csv\")\n process_links(conn, basedir/\"links.csv\")\n process_clusters(conn, basedir/\"clusters.csv\")\n process_bibliography(conn, basedir/\"bibliography.csv\")\n process_citations(conn, basedir/\"citations.csv\")", "def main():\n try:\n fname = sys.argv[1]\n f = open(fname, 'r')\n except IndexError:\n f = sys.stdin\n\n reader = Reader()\n for line in f:\n reader.getline(line)\n for key in sorted(reader.d.keys(), key=str.lower):\n sys.stdout.writelines(reader.diffsort(key))", "def process_VCF(input_vcf, targets_file, out_vcf = None) :\n\n\tfVCF_OUT = None\n\tif out_vcf is not None :\n\t\tfVCF_OUT = open(out_vcf, 'w')\n\tfDUP_OUT = open(targets_file, 'w')\n\n\tvariants_dict = {}\n\tvariants_list = []\n\tnum_redundant, num_kept = 0, 0\n\tfINVCF = open(input_vcf, 'r')\n\tfor line in fINVCF :\n\t\tif line.startswith('#') :\n\t\t\tif line.startswith(\"#CHROM\") :\n\t\t\t\tindividuals = re.split('\\t', line.strip())[9:]\n\t\t\t\tstdout.write(\"%d individuals included in the VCF file: %s\\n\" %(len(individuals), input_vcf))\n\t\t\tif fVCF_OUT :\n\t\t\t\tfVCF_OUT.write(line)\n\t\telse :\n\t\t\ttmp_line = re.split('\\t', line.strip())\n\t\t\tref_base = tmp_line[3]\n\t\t\talt_base = tmp_line[4]\n\t\t\tchrom_id = tmp_line[0]\n\t\t\tchrom_pos = tmp_line[1]\n\t\t\tqual = tmp_line[5]\n\t\t\tfilter = tmp_line[6]\t\t\t\t\t# PASS or FILTERED by VQSR #\n\t\t\t# fix sites having different types of calls: redundant calls #\n\t\t\tif not variants_dict.has_key(chrom_id+':'+chrom_pos) :\n\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\tvariants_list.append(chrom_id+':'+chrom_pos)\n\t\t\telse :\n\t\t\t\tnum_redundant += 1\n\t\t\t\tsame_site_diff_call = re.split('\\t', variants_dict[chrom_id+':'+chrom_pos])\n\t\t\t\ttmp_qual = same_site_diff_call[5]\n\t\t\t\ttmp_filter = same_site_diff_call[6]\n\t\t\t\ttmp_alt_base = same_site_diff_call[4]\n\t\t\t\tfDUP_OUT.write(\"%s\\n%s\\n\" %(variants_dict[chrom_id+':'+chrom_pos], line.strip()))\n\t\t\t\tif (tmp_filter != \"PASS\" and filter != \"PASS\") or (filter == \"PASS\" and tmp_filter == \"PASS\") :\t\t# if two different call both passed the VQSR or both not, we remove it from the final call set #\t\n\t\t\t\t\tvariants_dict.pop(chrom_id+':'+chrom_pos)\n\t\t\t\t\tvariants_list.remove(chrom_id+':'+chrom_pos)\n\t\t\t\t\tif filter == \"PASS\" :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both pass\\n\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both filtered\\n\")\n\t\t\t\telif filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" second kept\\n\")\n\t\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\t\tnum_kept += 1\n\t\t\t\telif tmp_filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" first kept\\n\")\n\t\t\t\t\tnum_kept += 1\n\tstdout.write(\"%d\\t%d\\n\" %(num_redundant, num_kept))\n\n\tif fVCF_OUT :\n\t\tfor i in range(len(variants_list)) :\n\t\t\tfVCF_OUT.write(\"%s\\n\" %(variants_dict[variants_list[i]]))\n\t\tfVCF_OUT.close()\n\tfINVCF.close()", "def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')", "def vcf_template(alignments, fasta, info_dict, format_dict, model_dir, version):\n meta_1 = [\n \"##fileformat=VCFv4.2\",\n \"##filedate=\" + get_today(),\n \"##source=RNAIndelv\" + version,\n \"##reference=\" + fasta,\n '##FILTER=<ID=NtF,Description=\"Not found in the BAM file as specified in the input VCF\">',\n '##FILTER=<ID=Lt2,Description=\"Less than 2 ALT allele count\">',\n '##FILTER=<ID=RqN,Description=\"Rescued by nearest indel\">',\n ]\n\n info_order = [\n \"PRED\",\n \"PROB\",\n \"ANNO\",\n \"COSMIC_CNT\",\n \"MAXMAF\",\n \"COMMON\",\n \"CLIN\",\n \"REP\",\n \"LC\",\n \"LLC\",\n \"GC\",\n \"LGC\",\n \"SG\",\n \"LSG\",\n \"DSM\",\n \"ICP\",\n \"ISZ\",\n \"INS\",\n \"ATI\",\n \"ATD\",\n \"GCI\",\n \"GCD\",\n \"REFC\",\n \"ALTC\",\n \"BID\",\n \"UQM\",\n \"NEB\",\n \"EQX\",\n \"MTA\",\n \"FRM\",\n \"SPL\",\n \"TRN\",\n \"CDD\",\n \"LOC\",\n \"NMD\",\n \"IPG\",\n \"LEN\",\n \"SNP\",\n \"RCF\",\n \"RQB\",\n ]\n\n meta_2 = [\n \"##INFO=<ID=\" + i + \",\"\n \"Number=\" + info_dict[i][\"Number\"] + \",\"\n \"Type=\" + info_dict[i][\"Type\"] + \",\"\n 'Description=\"' + info_dict[i][\"Description\"] + '\">'\n for i in info_order\n ]\n\n format_order = [\"AD\"]\n\n meta_3 = [\n \"##FORMAT=<ID=\" + i + \",\"\n \"Number=\" + format_dict[i][\"Number\"] + \",\"\n \"Type=\" + format_dict[i][\"Type\"] + \",\"\n 'Description=\"' + format_dict[i][\"Description\"] + '\">'\n for i in format_order\n ]\n\n meta_4 = [k + \"=\" + v for k, v in format_used_features(model_dir).items()]\n\n meta = meta_1 + meta_2 + meta_3 + meta_4\n\n header = [\n \"#CHROM\",\n \"POS\",\n \"ID\",\n \"REF\",\n \"ALT\",\n \"QUAL\",\n \"FILTER\",\n \"INFO\",\n \"FORMAT\",\n get_samplename(alignments),\n ]\n\n template = \"\\n\".join(meta + [\"\\t\".join(header)])\n\n return template", "def Add_in_vcf_SO(infofile,\n vcf_path,\n output_vcf, ):\n ori_format2info = ['AF', 'AD']\n\n if type(vcf_path) == str:\n vcf_readed = vcf.Reader(open(vcf_path, 'r'))\n else:\n try:\n vcf_readed = vcf.Reader(fsock=vcf_path)\n except:\n raise IOError('Wrong vcf, it is a %s' % str(type(vcf_path)))\n\n info_df = pd.read_csv(infofile, sep='\\t')\n info_df.index = info_df.iloc[:, [1, 2, 3]].astype(str).sum(1)\n samples = count_sample(vcf_readed.samples)\n\n if len(samples) != 1:\n return\n\n new_infos = vcf_readed.infos\n machine = list(new_infos.values())[0]\n\n field1 = \"SAD\"\n field2 = \"SAF\"\n field3 = \"PoS\"\n field1_info = [field1, 'R', 'Integer',\n \"(REF base count, alt base count). Self cal coverage from bam file. It is different from AD. It is rawer than AD.\"]\n field2_info = [field2, 'R', 'Float',\n \"Alt base count divide the total reads number in this pos. Self cal frequency from bam file. It is different from AF. It is rawer than AF.\"]\n field3_info = [field3, '.', 'Integer',\n \"A field which describe this file is single only analysis or pair analysis. 1 for single analysis, 2 for pair analysis.\"]\n new_infos[field1] = machine._make(field1_info + [None, None])\n new_infos[field2] = machine._make(field2_info + [None, None])\n new_infos[field3] = machine._make(field3_info + [None, None])\n for ori_format in ori_format2info:\n if ori_format not in new_infos.keys():\n new_infos[ori_format] = list(new_infos.values())[0]._make(\n list(vcf_readed.formats[ori_format]._asdict().values()) + [None, None])\n\n vcf_readed.infos = new_infos\n\n vcf_writer = vcf.Writer(open(output_vcf, 'w'),\n vcf_readed)\n\n for record in tqdm(vcf_readed):\n if record.is_snp:\n # SNP instead of indel\n query_index = record.CHROM + str(record.POS - 1) + str(record.REF)\n if query_index not in info_df.index:\n # it means there are np reads here\n ref_cov, alt_cov = 0, 0\n else:\n row = info_df.loc[query_index, :]\n if len(row.shape) == 2:\n # if multiple index occur\n row = row.iloc[0, :]\n\n ref_base = row[\"Reference\"] # should same as record.REF\n ref_cov = row[ref_base.upper()]\n alt_cov = row[str(record.ALT[0]).upper()]\n\n SAD = [int(ref_cov),\n int(alt_cov)]\n try:\n SAF = round(float(alt_cov) / sum(SAD), 4)\n except ZeroDivisionError:\n SAF = 0\n\n record.INFO[field1] = tuple(SAD)\n record.INFO[field2] = SAF\n record.INFO[field3] = 1\n else:\n # for indel we just ignore it.\n # write the original info\n pass\n\n for sample in record.samples:\n data = dict(sample.data._asdict())\n for ori_format in ori_format2info:\n if data.get(ori_format,''):\n record.INFO[ori_format] = data[ori_format]\n\n vcf_writer.write_record(record)\n vcf_writer.close()", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def ConciseVcf(fn):\n n = 0\n f = open(fn)\n for i in f:\n if i.startswith('##'):\n n += 1\n else:\n break\n df = pd.read_csv(fn, header=n, delim_whitespace=True)\n df = df.drop(['INFO', 'FORMAT', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER'], axis=1)\n for idx in df.columns[2:]:\n df[idx] = df[idx].map(lambda x: x.split(':')[0])\n df = df.replace(['0/0', '0/1', '1/0', '1/1', './.'], [0, 1, 1, 2, 9])\n return df", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def main(reference, vcf, max_nodes):\n\n # setting command line for vg construct\n cli = [\"vg\",\n \"construct\"]\n \n # reference and vcf files (1 or more)\n for reference_file in reference:\n cli += [\"-r\", reference_file]\n # vcf file is optional\n for vcf_file in vcf:\n if check(vcf_file):\n cli += [\"-v\", vcf_file]\n\n # nodes \n cli += [\"-m\", max_nodes]\n \n print(\"Running fastqc subprocess with command: {}\".format(cli))\n\n p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE, shell=False)\n stdout, stderr = p.communicate()\n\n # Attempt to decode STDERR output from bytes. If unsuccessful, coerce to\n # string\n try:\n stderr = stderr.decode(\"utf8\")\n except (UnicodeDecodeError, AttributeError):\n stderr = str(stderr)\n\n print(\"Finished vg construct subprocess with STDOUT:\\\\n\"\n \"======================================\\\\n{}\".format(stdout))\n print(\"Fished vg construct subprocesswith STDERR:\\\\n\"\n \"======================================\\\\n{}\".format(stderr))\n print(\"Finished vg construct with return code: {}\".format(p.returncode))\n\n # save vg file\n with open(\"reference.vg\", \"wb\") as vg_fh:\n vg_fh.write(stdout)", "def normalize_bcftools(orig_vcf, conf):\n norm_orig_vcf = orig_vcf.replace(\".vcf.gz\", \".norm.bcftools\" + util.randstr() + \".vcf\")\n norm_orig_cmd = conf.get('main', 'bcftools') + \" norm \" + \" -c w -f \" + conf.get('main', 'ref_genome') + \" \" + orig_vcf + \" -o \" + norm_orig_vcf\n subprocess.check_call(norm_orig_cmd.split())\n return util.bgz_tabix(norm_orig_vcf, conf)", "def main(args=None):\n # ---------- build and read options ----------\n USAGE = \"\"\"Usage %prog -i <FCDDATA> [options] \nConverts the given fcd file (generated by sumo --fcd-output) into the selected\noutput format. Optionally the output can be sampled, filtered and distorted.\n\"\"\"\n\n from optparse import OptionParser\n optParser = OptionParser(usage=USAGE)\n optParser.add_option(\"-i\", \"--fcd-input\", dest=\"fcd\", metavar=\"FILE\",\n help=\"Defines the FCD-output file to use as input\")\n optParser.add_option(\"-n\", \"--net-input\", dest=\"net\", metavar=\"FILE\",\n help=\"Defines the network file to use as input\")\n optParser.add_option(\"-p\", \"--penetration\", type=\"float\", dest=\"penetration\",\n default=1., help=\"Defines the percentage (0-1) of vehicles to export\")\n optParser.add_option(\"-b\", \"--begin\", dest=\"begin\",\n type=\"float\", help=\"Defines the first step to export\")\n optParser.add_option(\"-e\", \"--end\", dest=\"end\",\n type=\"float\", help=\"Defines the first step not longer to export\")\n optParser.add_option(\"-d\", \"--delta-t\", dest=\"delta\",\n type=\"float\", help=\"Defines the export step length\")\n optParser.add_option(\"--gps-blur\", dest=\"blur\", default=0,\n type=\"float\", help=\"Defines the GPS blur\")\n optParser.add_option(\n \"--boundary\", help=\"Defines the bounding box as 'xmin,ymin,xmax,ymax'\")\n optParser.add_option(\"-s\", \"--seed\", dest=\"seed\", default=42,\n type=\"float\", help=\"Defines the randomizer seed\")\n optParser.add_option(\n \"--base-date\", dest=\"base\", default=-1, type=\"int\", help=\"Defines the base date\")\n optParser.add_option(\"--orig-ids\", dest=\"orig_ids\", default=False, action=\"store_true\",\n help=\"Write original vehicle IDs instead of a running index\")\n optParser.add_option(\"--ignore-gaps\", dest=\"ignore_gaps\", default=False, action=\"store_true\",\n help=\"Ignore steps where a vehicle is not in the network\")\n optParser.add_option(\"--persons\", default=False, action=\"store_true\",\n help=\"Include person data\")\n # PHEM\n optParser.add_option(\"--dri-output\", dest=\"dri\", metavar=\"FILE\",\n help=\"Defines the name of the PHEM .dri-file to generate\")\n optParser.add_option(\"--str-output\", dest=\"str\", metavar=\"FILE\",\n help=\"Defines the name of the PHEM .str-file to generate\")\n optParser.add_option(\"--fzp-output\", dest=\"fzp\", metavar=\"FILE\",\n help=\"Defines the name of the PHEM .fzp-file to generate\")\n optParser.add_option(\"--flt-output\", dest=\"flt\", metavar=\"FILE\",\n help=\"Defines the name of the PHEM .flt-file to generate\")\n # OMNET\n optParser.add_option(\"--omnet-output\", dest=\"omnet\", metavar=\"FILE\",\n help=\"Defines the name of the OMNET file to generate\")\n # Shawn\n optParser.add_option(\"--shawn-output\", dest=\"shawn\", metavar=\"FILE\",\n help=\"Defines the name of the Shawn file to generate\")\n # ns2\n optParser.add_option(\"--ns2activity-output\", dest=\"ns2activity\", metavar=\"FILE\",\n help=\"Defines the name of the ns2 file to generate\")\n optParser.add_option(\"--ns2config-output\", dest=\"ns2config\", metavar=\"FILE\",\n help=\"Defines the name of the ns2 file to generate\")\n optParser.add_option(\"--ns2mobility-output\", dest=\"ns2mobility\", metavar=\"FILE\",\n help=\"Defines the name of the ns2 file to generate\")\n # GPSDAT\n optParser.add_option(\"--gpsdat-output\", dest=\"gpsdat\", metavar=\"FILE\",\n help=\"Defines the name of the gpsdat file to generate\")\n\n # GPX\n optParser.add_option(\"--gpx-output\", dest=\"gpx\", metavar=\"FILE\",\n help=\"Defines the name of the gpx file to generate\")\n # POI\n optParser.add_option(\"--poi-output\", dest=\"poi\", metavar=\"FILE\",\n help=\"Defines the name of the poi file to generate\")\n # parse\n if len(args) == 1:\n sys.exit(USAGE.replace('%prog', os.path.basename(__file__)))\n options, remaining_args = optParser.parse_args(args=args)\n\n if options.seed:\n random.seed(options.seed)\n # ---------- process ----------\n net = None\n # ----- check needed values\n if options.delta and options.delta <= 0:\n print(\"delta-t must be a positive value.\")\n return 1\n # phem\n if (options.dri or options.fzp or options.flt) and not options.fcd:\n print(\"A fcd-output from SUMO must be given using the --fcd-input.\")\n return 1\n if (options.str or options.fzp or options.flt) and not options.net:\n print(\"A SUMO network must be given using the --net-input option.\")\n return 1\n # omnet\n if options.omnet and not options.fcd:\n print(\"A fcd-output from SUMO must be given using the --fcd-input.\")\n return 1\n # ----- check needed values\n\n ## ----- OMNET\n if options.omnet:\n runMethod(options.fcd, options.omnet, omnet.fcd2omnet, options)\n ## ----- OMNET\n\n ## ----- Shawn\n if options.shawn:\n runMethod(options.fcd, options.shawn, shawn.fcd2shawn, options)\n ## ----- Shawn\n\n ## ----- GPSDAT\n if options.gpsdat:\n runMethod(options.fcd, options.gpsdat, gpsdat.fcd2gpsdat, options)\n ## ----- GPSDAT\n\n ## ----- GPX\n if options.gpx:\n runMethod(options.fcd, options.gpx, gpx.fcd2gpx, options)\n ## ----- GPX\n\n ## ----- GPX\n if options.poi:\n runMethod(options.fcd, options.poi, poi.fcd2poi, options)\n ## ----- GPX\n\n ## ----- ns2\n if options.ns2mobility or options.ns2config or options.ns2activity:\n vIDm, vehInfo, begin, end, area = runMethod(\n options.fcd, options.ns2mobility, ns2.fcd2ns2mobility, options)\n if options.ns2activity:\n o = _getOutputStream(options.ns2activity)\n ns2.writeNS2activity(o, vehInfo)\n _closeOutputStream(o)\n if options.ns2config:\n o = _getOutputStream(options.ns2config)\n ns2.writeNS2config(\n o, vehInfo, options.ns2activity, options.ns2mobility, begin, end, area)\n _closeOutputStream(o)\n ## ----- ns2\n\n ## ----- PHEM\n # .dri\n if options.dri:\n runMethod(options.fcd, options.dri, phem.fcd2dri, options)\n # .str (we need the net for other outputs, too)\n if options.str or options.fzp or options.flt:\n if not options.net:\n print(\"A SUMO network must be given using the --net-input option.\")\n return 1\n if not net:\n net = sumolib.net.readNet(options.net)\n o = _getOutputStream(options.str)\n sIDm = phem.net2str(net, o)\n _closeOutputStream(o)\n # .fzp\n if options.flt or options.fzp:\n vIDm, vtIDm = runMethod(\n options.fcd, options.fzp, phem.fcd2fzp, options, {\"phemStreetMap\": sIDm})\n # .flt\n if options.flt:\n o = _getOutputStream(options.flt)\n phem.vehicleTypes2flt(o, vtIDm)\n _closeOutputStream(o)\n ## ----- PHEM\n return 0", "def export(ctx, outfile):\n adapter = ctx.obj['adapter']\n \n logger.info(\"Export the variants from {0}\".format(adapter))\n nr_cases = 0\n \n existing_chromosomes = set(adapter.get_chromosomes())\n \n ordered_chromosomes = []\n for chrom in CHROMOSOME_ORDER:\n if chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n existing_chromosomes.remove(chrom)\n for chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n \n nr_cases = adapter.cases().count()\n logger.info(\"Found {0} cases in database\".format(nr_cases))\n\n head = HeaderParser()\n head.add_fileformat(\"VCFv4.3\")\n head.add_meta_line(\"NrCases\", nr_cases)\n head.add_info(\"Obs\", '1', 'Integer', \"The number of observations for the variant\")\n head.add_info(\"Hom\", '1', 'Integer', \"The number of observed homozygotes\")\n head.add_info(\"Hem\", '1', 'Integer', \"The number of observed hemizygotes\")\n head.add_version_tracking(\"loqusdb\", __version__, datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n for chrom in ordered_chromosomes:\n length = adapter.get_max_position(chrom)\n head.add_contig(contig_id=chrom, length=str(length))\n\n print_headers(head, outfile=outfile)\n \n for chrom in ordered_chromosomes:\n for variant in adapter.get_variants(chromosome=chrom):\n chrom = variant['chrom']\n pos = variant['start']\n ref = variant['ref']\n alt = variant['alt']\n observations = variant['observations']\n homozygotes = variant['homozygote']\n hemizygotes = variant['hemizygote']\n info = \"Obs={0}\".format(observations)\n if homozygotes:\n info += \";Hom={0}\".format(homozygotes)\n if hemizygotes:\n info += \";Hem={0}\".format(hemizygotes)\n variant_line = \"{0}\\t{1}\\t.\\t{2}\\t{3}\\t.\\t.\\t{4}\\n\".format(\n chrom, pos, ref, alt, info)\n print_variant(variant_line=variant_line, outfile=outfile)", "def main():\n parser = argparse.ArgumentParser(\n description=\"Lite version of the CNVnator written in Python.\\nA tool for CNV discovery from depth of read mapping.\")\n parser.add_argument('-version', '--version', action='store_true', help='show version number and exit')\n parser.add_argument('-root', '--root', type=str, nargs=\"+\",\n help=\"CNVnator hd5 file: data storage for all calculations\", default=None)\n\n parser.add_argument('-download', '--download_resources', action='store_true', help='download resource files')\n\n parser.add_argument('-chrom', '--chrom', type=str, nargs=\"+\", help=\"list of chromosomes to apply calculation\",\n default=[])\n parser.add_argument('-v', '--verbose', type=str,\n choices=[\"none\", \"debug\", \"info\", \"warning\", \"error\", \"d\", \"e\", \"i\", \"w\"],\n help=\"verbose level: debug, info (default), warning, error\", default=\"info\")\n parser.add_argument('-log', '--log_file', type=str, help='log file')\n parser.add_argument('-j', '--max_cores', type=int,\n help=\"maximal number of cores to use in calculation\", default=8)\n parser.add_argument('-rd', '--rd', nargs=\"+\", type=str, help=\"read bam/sam/cram and store read depth information\")\n parser.add_argument('-T', '--reference_filename', type=str, help=\"reference fasta for CRAM\")\n\n parser.add_argument('-gc', '--gc', type=str, help=\"read fasta file and store GC/AT content\")\n parser.add_argument('-cgc', '--copy_gc', type=str, help=\"copy GC/AT content from another cnvnator file\")\n parser.add_argument('-his', '--his', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-snp2his', '--his_from_snp', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-stat', '--stat', type=binsize_type, nargs=\"+\",\n help=\"calculate statistics for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-partition', '--partition', type=binsize_type, nargs=\"+\",\n help=\"calculate segmentation for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-call', '--call', type=str, nargs=\"+\",\n help=\"CNV caller: [baf] bin_size [bin_size2 ...] (multiple bin sizes separate by space)\")\n parser.add_argument('-vcf', '-snp', '--vcf', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-somatic_snv', '--somatic_snv', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n\n parser.add_argument('-minc', '--min_count', type=int,\n help=\"minimal count of haterozygous SNPs\", default=None)\n parser.add_argument('-vcf2rd', '--rd_from_vcf', type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-noAD', '--no_snp_counts', action='store_true',\n help=\"read positions of variants, not counts (AD tag)\")\n parser.add_argument('-nofilter', '--no_filter', action='store_true',\n help=\"read all variants (not only PASS)\")\n parser.add_argument('-ad', '--ad_tag', type=str, help=\"counts tag (default: AD)\", default=\"AD\")\n parser.add_argument('-gt', '--gt_tag', type=str, help=\"genotype tag (default: GT)\", default=\"GT\")\n parser.add_argument('-dp', '--dp_tag', type=str, help=\"read depth tag (default: DP)\", default=\"DP\")\n parser.add_argument('-callset', '--callset', type=str, help=\"name for somatic VCF signal\", default=None)\n parser.add_argument('-maxcn', '--max_copy_number', type=int, help=\"maximal copy number\", default=10)\n parser.add_argument('-mindbaf', '--baf_threshold', type=float, help=\"threshold for change in BAF level\",\n default=0.0)\n parser.add_argument('-bafres', '--baf_resolution', type=int, help=\"Resolution for unphased BAF likelihood\",\n default=200)\n parser.add_argument('-nolh', '--no_save_likelihood', action='store_true',\n help=\"do not save likelihood histograms (reduce size of pytor file)\")\n parser.add_argument('-oth', '--overlap_threshold', type=float, help=\"likelihood overlap threshold\",\n default=None)\n parser.add_argument('-mincf', '--min_cell_fraction', type=float, help=\"minimal cell fraction\", default=0.0)\n\n parser.add_argument('-pileup', '--pileup_bam', nargs=\"+\", type=str, help=\"calculate SNP counts from bam files\")\n parser.add_argument('-snp2rd', '--rd_from_snp', action='store_true', help=\"calculate RD from SNP counts\")\n parser.add_argument('-sbin', '--s_bin_size', type=binsize_type, help=\"Super bin size (use with -snp2rd)\",\n default=10000)\n\n parser.add_argument('-mask', '--mask', type=str, help=\"read fasta mask file and flag SNPs in P region\")\n parser.add_argument('-mask_snps', '--mask_snps', action='store_true', help=\"flag SNPs in P region\")\n parser.add_argument('-trio_phase', '--trio_phase', action='store_true', help=\"Phase trio\")\n parser.add_argument('-parents', '--phase_parents', action='store_true', help=\"Phase parents\")\n parser.add_argument('-mask_snvs', '--mask_snvs', type=str, help=\"flag SNVs in P region\")\n parser.add_argument('-idvar', '--idvar', type=str, help=\"read vcf file and flag SNPs that exist in database file\")\n parser.add_argument('-random_phase', '--random_phase', action='store_true', help=\"randomly phase SNPs\")\n parser.add_argument('-baf', '--baf', type=binsize_type, nargs=\"+\",\n help=\"create BAF histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-nomask', '--no_mask', action='store_true', help=\"do not use P mask in BAF histograms\")\n parser.add_argument('-useid', '--use_id', action='store_true', help=\"use id flag filtering in SNP histograms\")\n parser.add_argument('-usehom', '--use_hom', action='store_true', help=\"use hom\")\n parser.add_argument('-usephase', '--use_phase', action='store_true',\n help=\"use information about phase while processing SNP data\")\n parser.add_argument('-reducenoise', '--reduce_noise', action='store_true',\n help=\"reduce noise in processing SNP data\")\n parser.add_argument('-blw', '--baf_likelihood_width', type=float,\n help=\"likelihood width used in processing SNP data (default=0.8)\", default=0.8)\n parser.add_argument('-altc', '--alt_corr', action='store_true',\n help=\"Remove alt/ref bias\")\n\n parser.add_argument('-plot', '--plot', type=str, nargs=\"+\", help=\"plotting\")\n parser.add_argument('-view', '--view', type=binsize_type,\n help=\"Enters interactive ploting mode\")\n parser.add_argument('-agg', '--force_agg', action='store_true', help=\"Force Agg matplotlib backend\")\n\n parser.add_argument('-panels', '--panels', type=str, nargs=\"+\", default=[\"rd\"], choices=[\"rd\", \"baf\", \"likelihood\"],\n help=\"plot panels (with -plot regions)\")\n\n parser.add_argument('-style', '--plot_style', type=str,\n help=\"available plot styles: \" + \", \".join(plt.style.available), choices=plt.style.available)\n parser.add_argument('-o', '--plot_output_file', type=str, help=\"output filename prefix and extension\", default=\"\")\n parser.add_argument('-anim', '--animation', type=str, help=\"animation folder/prefix\", default=\"\")\n\n parser.add_argument('-make_gc_file', '--make_gc_genome_file', action='store_true',\n help=\"used with -gc will create genome gc file\")\n parser.add_argument('-make_mask_file', '--make_mask_genome_file', action='store_true',\n help=\"used with -mask will create genome mask file\")\n parser.add_argument('-rd_use_mask', '--use_mask_with_rd', action='store_true', help=\"used P mask in RD histograms\")\n parser.add_argument('-nogc', '--no_gc_corr', action='store_true', help=\"do not use GC correction in RD histograms\")\n parser.add_argument('-rg', '--reference_genome', type=str, help=\"Manually set reference genome\", default=None)\n parser.add_argument('-sample', '--vcf_sample', type=str, help=\"Sample name in vcf file\", default=\"\")\n parser.add_argument('-conf', '--reference_genomes_conf', type=str, help=\"Configuration with reference genomes\",\n default=None)\n\n parser.add_argument('-ls', '--ls', action='store_true', help='list pytor file(s) content')\n parser.add_argument('-gc_info', '--gc_info', action='store_true', help='list pytor file(s) gc content stat')\n parser.add_argument('-rg_info', '--rg_info', action='store_true', help='list loaded reference gnomes')\n parser.add_argument('-info', '--info', type=binsize_type, nargs=\"*\", help='print statistics for pythor file(s)')\n parser.add_argument('-qc', '--qc', type=binsize_type, nargs=\"*\", help='print quality control statistics')\n parser.add_argument('-rdqc', '--rd_qc', type=binsize_type, nargs=\"*\",\n help='print quality control statistics without SNP data')\n parser.add_argument('-comp', '--compare', type=str, nargs=\"*\", help='compere two regions: -comp reg1 reg2 [n_bins]')\n parser.add_argument('-genotype', '--genotype', type=str, nargs=\"*\")\n parser.add_argument('-a', '--all', action='store_true', help='Genotype with all columns')\n parser.add_argument('-meta', '--metadata', action='store_true', help='list Metadata')\n parser.add_argument('-fasta2rg', '--reference_genome_template', type=str,\n help=\"create template for reference genome using chromosome lengths from fasta file\")\n parser.add_argument('-export', '--export', type=str, nargs=\"*\", help='Export to jbrowse and cnvnator')\n args = parser.parse_args(sys.argv[1:])\n\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if args.verbose in {\"debug\", \"d\"}:\n level = logging.DEBUG\n elif args.verbose in {\"info\", \"i\"}:\n level = logging.INFO\n elif args.verbose in {\"warning\", \"w\"}:\n level = logging.WARNING\n elif args.verbose in {\"error\", \"e\"}:\n level = logging.ERROR\n else:\n level = logging.CRITICAL\n\n if args.log_file:\n logging.basicConfig(filename=args.log_file, level=logging.DEBUG, format=log_format)\n logger = logging.getLogger('cnvpytor')\n ch = logging.StreamHandler()\n formatter = logging.Formatter(log_format)\n ch.setFormatter(formatter)\n ch.setLevel(level)\n logger.addHandler(ch)\n else:\n logging.basicConfig(level=level, format=log_format)\n logger = logging.getLogger('cnvpytor')\n logger.debug(\"Start logging...\")\n\n if args.reference_genome_template is not None:\n Fasta(args.reference_genome_template).print_reference_genome_template()\n\n if args.download_resources:\n Genome.download_resources()\n return 0\n\n if not Genome.check_resources():\n logger.error(\"Some reference genome resource files are missing. \"\n \"Run 'cnvpytor -download' as same user who has installed cnvpytor.\")\n return 0\n\n if args.version:\n print('CNVpytor {}'.format(__version__))\n return 0\n\n if args.reference_genomes_conf:\n Genome.load_reference_genomes(args.reference_genomes_conf)\n elif os.path.exists(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py')):\n Genome.load_reference_genomes(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py'))\n\n if args.rg_info:\n Genome.print_reference_genomes()\n\n if args.root is not None:\n\n if args.ls:\n show = Show(args.root)\n show.ls()\n\n if args.gc_info:\n show = Show(args.root)\n show.gc_info()\n\n if args.export:\n if len(args.export) > 0:\n dir_name_list = args.export[1:]\n dir_name = ''\n if len(dir_name_list) > 0:\n dir_name = dir_name_list[0]\n export_program = args.export[0].lower()\n if export_program in ['jbrowse', 'cnvnator']:\n if export_program == 'jbrowse':\n export_j = ExportJBrowse(args.root, dir_name)\n export_j.create_reference_json()\n export_j.rd_signal()\n export_j.snp_signal()\n export_j.create_tracklist_json()\n elif export_program == 'cnvnator':\n logger.info(\"Under Development\")\n else:\n logger.error(\"Incorrect export program name\")\n\n if args.metadata:\n show = Show(args.root)\n show.meta()\n\n if args.info is not None:\n show = Show(args.root)\n show.info(args.info)\n\n\n if args.genotype is not None:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.genotype_prompt(list(map(binsize_type, args.genotype)), all=args.all)\n\n if args.qc is not None:\n params = {\"bin_size\": binsize_type(args.qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc()\n\n if args.rd_qc is not None:\n params = {\"bin_size\": binsize_type(args.rd_qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc(snp_qc=False)\n\n\n if args.compare is not None:\n params = {\"bin_size\": binsize_type(args.compare[-1]),\n \"rd_use_gc_corr\": not args.no_gc_corr,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n if len(args.compare) == 3:\n view.compare(args.compare[0], args.compare[1])\n elif len(args.compare) == 4:\n view.compare(args.compare[0], args.compare[1], int(args.compare[2]))\n\n if args.rd:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd(args.rd, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.reference_genome:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.set_reference_genome(args.reference_genome)\n\n if args.plot:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params)\n view.plot_command(args.plot)\n\n if args.view:\n params = {\"bin_size\": args.view,\n \"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.prompt()\n\n if args.gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.gc(args.gc, chroms=args.chrom, make_gc_genome_file=args.make_gc_genome_file)\n\n if args.copy_gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.copy_gc(args.copy_gc, chroms=args.chrom)\n\n if args.vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.vcf(args.vcf, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter)\n\n if args.idvar:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.variant_id(args.idvar, chroms=args.chrom)\n\n if args.somatic_snv:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n callset = \"default\" if args.callset is None else args.callset\n app.vcf(args.somatic_snv, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter, callset=callset)\n\n if args.rd_from_vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd_from_vcf(args.rd_from_vcf, chroms=args.chrom, sample=args.vcf_sample, ad_tag=args.ad_tag,\n dp_tag=args.dp_tag)\n\n if args.pileup_bam:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.pileup(args.pileup_bam, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.rd_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_from_snp(chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n s_bin_size=args.s_bin_size)\n\n if args.mask:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.mask(args.mask, chroms=args.chrom, make_mask_genome_file=args.make_mask_genome_file)\n\n if args.mask_snps:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps()\n\n if args.mask_snvs:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps(callset=args.mask_snvs)\n\n if args.random_phase:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.random_phase()\n\n if args.trio_phase:\n app = Trio(args.root)\n app.trio_phase(parents=args.phase_parents)\n\n if args.stat:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_stat(chroms=args.chrom)\n\n if args.his:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms(args.his, chroms=args.chrom)\n\n if args.his_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms_from_snp_counts(args.his_from_snp, chroms=args.chrom, use_mask=not args.no_mask,\n use_id=args.use_id, callset=args.callset,\n min_count=args.min_count)\n if args.baf:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_baf(args.baf, chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n use_phase=args.use_phase, res=args.baf_resolution, reduce_noise=args.reduce_noise, blw=args.baf_likelihood_width,\n use_hom=args.use_hom, alt_ref_correct=args.alt_corr, save_likelihood=not args.no_save_likelihood)\n if args.partition:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.partition(args.partition, chroms=args.chrom, use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd)\n\n if args.call:\n app = Root(args.root[0], max_cores=args.max_cores)\n if args.call[0] == \"baf\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_baf_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_baf(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n #app.call_baf_old([binsize_type(x) for x in args.call[1:]], chroms=args.chrom, use_id=args.use_id,\n # use_mask=not args.no_mask, mcount=args.min_count, anim=args.animation)\n elif args.call[0] == \"mosaic\":\n app.call_mosaic(list(map(binsize_type, args.call[1:])), chroms=args.chrom,\n use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd, anim=args.animation)\n elif args.call[0] == \"subclones\":\n bins = list(map(binsize_type, args.call[1:]))\n app.call_subclones(bins, chroms=args.chrom, cnv_calls=\"calls combined\", print_calls=True,\n use_gc_corr=not args.no_gc_corr, rd_use_mask=args.use_mask_with_rd,\n snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold)\n elif args.call[0] == \"combined\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_2d_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_2d(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call(list(map(binsize_type, args.call)), chroms=args.chrom, print_calls=True,\n use_gc_corr=not args.no_gc_corr, use_mask=args.use_mask_with_rd)", "def merge_vcf_chunks(out_dir, path_name, path_size, chunks, overwrite):\n vcf_path = os.path.join(out_dir, path_name + \".vcf\")\n if overwrite or not os.path.isfile(vcf_path):\n first = True\n for chunk_i, chunk in enumerate(chunks):\n clip_path = chunk_base_name(path_name, out_dir, chunk_i, \"_clip.vcf\")\n if os.path.isfile(clip_path):\n if first is True:\n # copy everything including the header\n run(\"cat {} > {}\".format(clip_path, vcf_path))\n first = False\n else:\n # add on everythin but header\n run(\"grep -v \\\"^#\\\" {} >> {}\".format(clip_path, vcf_path), check=False)\n \n # add a compressed indexed version\n if overwrite or not os.path.isfile(vcf_path + \".gz\"):\n run(\"bgzip -c {} > {}\".format(vcf_path, vcf_path + \".gz\"))\n run(\"tabix -f -p vcf {}\".format(vcf_path + \".gz\"))", "def command_vcf2vci(raw_args, prog=None):\n\n if prog:\n parser = argparse.ArgumentParser(prog=prog, add_help=False)\n else:\n parser = argparse.ArgumentParser(add_help=False)\n\n def print_message(message):\n if message:\n sys.stderr.write(message)\n else:\n sys.stderr.write(command_vcf2vci.__doc__)\n sys.stderr.write('\\n')\n sys.exit(1)\n\n parser.error = print_message\n\n # required\n parser.add_argument(\"-i\", \"--vcf\", dest=\"vcf_files\", metavar=\"vcf_file\", action='append')\n parser.add_argument(\"-f\", \"--fasta\", dest=\"fasta_file\", metavar=\"fasta_file\")\n parser.add_argument(\"-s\", \"--strain\", dest=\"strain\", metavar=\"strain\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", metavar=\"VCI_File\")\n\n # optional\n parser.add_argument(\"-p\", \"--num-processes\", type=int, dest=\"numprocesses\", metavar=\"number_of_processes\")\n parser.add_argument(\"--diploid\", dest=\"diploid\", action='store_true')\n parser.add_argument(\"--keep\", dest=\"keep\", action='store_true')\n parser.add_argument(\"--pass\", dest=\"passed\", action='store_true')\n parser.add_argument(\"--quality\", dest=\"quality\", action='store_true')\n parser.add_argument(\"--no-bgzip\", dest=\"nobgzip\", action='store_false', default=True)\n\n # debugging and help\n parser.add_argument(\"-h\", \"--help\", dest=\"help\", action='store_true')\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action=\"count\", default=0)\n\n args = parser.parse_args(raw_args)\n\n g2g.configure_logging(args.debug)\n\n if len(raw_args) == 0 or args.help:\n g2g.exit(\"\", parser)\n\n if not args.vcf_files:\n g2g.exit(\"No VCF file was specified.\", parser)\n\n if not args.output:\n g2g.exit(\"No output VCF file was specified.\", parser)\n\n if not args.strain:\n g2g.exit(\"No strain was specified.\", parser)\n\n try:\n vcf2vci.process(args.vcf_files, args.fasta_file, args.output, args.strain, args.keep, args.passed, args.quality, args.diploid, args.numprocesses, args.nobgzip)\n except KeyboardInterrupt as ki:\n LOG.debug(ki)\n except exceptions.G2GValueError as e:\n g2g.exit(e, parser)\n except exceptions.G2GVCFError as e:\n g2g.exit(e, parser)", "def write_vcf(file, ref, genome):\n vcf = open(file, \"w\")\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n vcf.write(\"##fileDate={}\\n\".format(datetime.datetime.today().strftime('%Y%m%d')))\n vcf.write(\"##source={}\\n\".format(os.path.basename(__file__)))\n vcf.write(\"##reference={}\\n\".format(ref))\n vcf.write(\"##contig=<ID={}\\n\".format(genome.name))\n vcf.write(\"##translocation_origin=END position relates to insertion position\\n\")\n vcf.write(\"##translocation_insert=END position relates to original start position\\n\")\n vcf.write('##INFO=<ID=END,Number=1,Type=Integer,Description=\"End position of variant\">\\n')\n vcf.write('##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Type of structural variant detected\">\\n')\n vcf.write('##INFO=<ID=LEN,Number=1,Type=Integer,Description=\"Length of variant region\">\\n')\n vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\n\")\n for var in reversed(genome.get_variants()):\n info = \"END={};SVTYPE={};LEN={}\".format(var.end,var.type,var.size)\n vcf.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\n genome.name,var.start,'.',var.ref,var.alt,'.','.',info,'.'))\n\n vcf.close()", "def main():\n args = parse_arguments()\n\n de_data = pd.read_csv(args.raw_file, sep=\"\\t\")\n de_data.rename(columns={\"Unnamed: 0\": \"gene_id\"}, inplace=True)\n de_data.fillna(value=1, inplace=True)\n columns = {}\n col_order = []\n\n # Make sure all listed numeric columns are valid numeric variables based\n # on a union of numeric column names from cuffdiff, edgeR, deseq2 and test\n # files.\n numeric_columns = [\n \"baseMean\",\n \"log2FoldChange\",\n \"lfcSE\",\n \"stat\",\n \"pvalue\",\n \"padj\",\n \"value_1\",\n \"value_2\",\n \"log2(fold_change)\",\n \"test_stat\",\n \"p_value\",\n \"q_value\",\n \"logfc\",\n \"fdr\",\n \"stat\",\n \"logFC\",\n \"logCPM\",\n \"LR\",\n \"Pvalue\",\n \"FDR\",\n ]\n de_columns = de_data.columns\n\n for column in numeric_columns:\n if column not in de_columns:\n continue\n\n if not is_numeric_dtype(de_data[column]):\n msg = (\n f\"Column {column} is not numeric. Please make sure \"\n f\"that the input file has valid numeric values (i.e. \"\n f\"periods for decimal places).\"\n )\n send_message(error(msg))\n raise ValueError(msg)\n\n if args.gene_id:\n if args.gene_id == \"index\":\n columns[\"gene_id\"] = list(de_data.index.astype(str))\n col_order.append(\"gene_id\")\n else:\n columns[\"gene_id\"] = list(de_data[args.gene_id].astype(str))\n col_order.append(\"gene_id\")\n\n if args.logfc:\n col = np.array(de_data[args.logfc])\n col[np.isinf(col)] = 0\n columns[\"logfc\"] = list(col)\n col_order.append(\"logfc\")\n\n if args.fdr:\n columns[\"fdr\"] = list(de_data[args.fdr])\n col_order.append(\"fdr\")\n\n if args.pvalue:\n columns[\"pvalue\"] = list(de_data[args.pvalue])\n col_order.append(\"pvalue\")\n\n if args.fwer:\n columns[\"fwer\"] = list(de_data[args.fwer])\n col_order.append(\"fwer\")\n\n if args.logodds:\n columns[\"logodds\"] = list(de_data[args.logodds])\n col_order.append(\"logodds\")\n\n if args.stat:\n columns[\"stat\"] = list(de_data[args.stat])\n col_order.append(\"stat\")\n\n with open(args.output_json, \"w\") as f:\n json.dump(columns, f, separators=(\",\", \":\"), allow_nan=False)\n\n outdf = pd.DataFrame(columns)\n outdf = outdf[col_order]\n outdf.to_csv(args.output_file, sep=\"\\t\", index=False, compression=\"gzip\")" ]
[ "0.65551376", "0.6353839", "0.6204274", "0.6045592", "0.6039398", "0.6026456", "0.60237366", "0.6014583", "0.5893786", "0.5844245", "0.58375883", "0.58292997", "0.5802896", "0.5797852", "0.5705959", "0.55909956", "0.5515224", "0.5505494", "0.5427722", "0.5370823", "0.5360676", "0.53374904", "0.5337267", "0.5324869", "0.5313133", "0.5285278", "0.5279696", "0.5272208", "0.5220193", "0.5216121" ]
0.70393276
0
Get the configured lxc root for containers
def get_root_path(path): if not path: path = __opts__.get("lxc.root_path", DEFAULT_PATH) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conda_root():\n root_path = \"\"\n try:\n conda_json = subprocess.check_output(\"conda info --json\",\n shell=True, stderr=subprocess.PIPE)\n if sys.version_info.major > 2:\n conda_json = conda_json.decode(\"utf-8\")\n\n dec = json.JSONDecoder()\n root_path = dec.decode(conda_json)['default_prefix']\n if sys.version_info.major < 3:\n root_path = str(root_path)\n\n except Exception as ex:\n print(\"[Info] Conda check: %s\" % ex)\n\n return root_path", "def find_lxd_config():\n paths = []\n paths.append(os.path.expanduser(\"~/.config/lxc\"))\n paths.append(os.path.expanduser(\"~/snap/lxd/current/.config/lxc\"))\n\n for path in paths:\n if os.path.exists(path):\n crt = os.path.expanduser(\"{}/client.crt\".format(path))\n key = os.path.expanduser(\"{}/client.key\".format(path))\n if os.path.exists(crt) and os.path.exists(key):\n return (crt, key)\n return (None, None)", "def find_root():\n\n curdir = os.path.curdir\n fs_root = \"/\"\n # Do as build/envsetup.sh does\n # if this files exists, we're at root\n root_clue = \"build/core/envsetup.mk\"\n found = False\n while not found and not os.path.samefile(fs_root, curdir):\n if os.path.exists(os.path.join(curdir, root_clue)):\n found = True\n break\n curdir = os.path.join(os.path.pardir, curdir)\n return curdir if found else None", "def GetImportRoot():\n import_root_env = os.environ.get('LOGICAPATH')\n if not import_root_env:\n return None\n roots = import_root_env.split(':')\n if len(roots) > 1:\n return roots\n else:\n return import_root_env", "def get_root():\n\n return 'data/simulators/mg1'", "def work_root(session):\n return session[\"AVALON_WORKDIR\"]", "def container_workingdir(self):\n return self.environment['HOME']", "def _get_volroot(self, nms):\n if not self.nms_cache_volroot:\n return nms.server.get_prop('volroot')\n if nms not in self._nms2volroot:\n self._nms2volroot[nms] = nms.server.get_prop('volroot')\n return self._nms2volroot[nms]", "def shadow_container(kls):\n if os.name == 'posix' and os.path.isdir('/dev/shm'):\n return '/dev/shm/'\n else:\n return gettempdir()", "def get_conda_root():\n try:\n # Fast-path\n # We're in the root environment\n conda_root = _import_conda_root()\n except ImportError:\n # We're not in the root environment.\n envs_dir = dirname(CONDA_PREFIX)\n if basename(envs_dir) == 'envs':\n # We're in a named environment: `conda create -n <name>`\n conda_root = dirname(envs_dir)\n else:\n # We're in an isolated environment: `conda create -p <path>`\n # The only way we can find out is by calling conda.\n conda_root = _conda_root_from_conda_info()\n\n return conda_root", "def search_lxc_bridge():\n return search_lxc_bridges()[0]", "def get_root(self, *args, **kwargs):\n return self._resources_manager.get_root(*args, **kwargs)", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))", "def getProjectRootPath():\n rootpath = os.getcwd().split('\\eles')[0]\n return rootpath", "def get_zone_root(self, refresh=False):\n return self.get_zonepath(refresh) + \"/root\"", "def root(self) -> str:\n return pulumi.get(self, \"root\")", "def root_dir():\r\n return Path(__file__).parent.parent", "def get_root_dir(args):\n\n if args.root_dir:\n if not os.path.isdir(os.path.realpath(args.root_dir)):\n raise OpsException(\n \"Specified root dir '%s' does not exists\" %\n os.path.realpath(\n args.root_dir))\n\n return os.path.realpath(args.root_dir)\n\n return os.path.realpath(os.getcwd())", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def var_REPO_ROOT(self):\n return rh.git.find_repo_root()", "def getNfsRoot(self):\n\t\treturn self.nfsroot", "def getRootFolder():\n return CURRENT_CONNECTION.rootFolder", "def get_buildroot():\r\n try:\r\n return BuildRoot().path\r\n except BuildRoot.NotFoundError as e:\r\n print(e.message, file=sys.stderr)\r\n sys.exit(1)", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def root_dir():\n return dirname(dirname(__file__))", "def root(self) -> str:\n if not hasattr(self, '_root'):\n root_keys = self.config.keys()\n self._root = next(iter(root_keys))\n return self._root", "def get_git_root():\n path = os.getcwd()\n git_repo = git.Repo(path, search_parent_directories=True)\n git_root = git_repo.git.rev_parse(\"--show-toplevel\")\n return git_root", "def get_runtime_default_log_path(soln_stk, container_config):\n\n cont_info = _get_preconfig_info(soln_stk, container_config)\n return cont_info[RUNTIME_DEFAULT_LOG_KEY]", "def root(self):\n return self.paths.root" ]
[ "0.637064", "0.6228243", "0.611554", "0.610834", "0.6107005", "0.6073694", "0.6021122", "0.59645015", "0.59363914", "0.5918172", "0.59082437", "0.5842891", "0.58324474", "0.5808479", "0.58053213", "0.5800716", "0.5797924", "0.57776356", "0.5768648", "0.5768153", "0.5752801", "0.57418525", "0.57381624", "0.5732036", "0.5726788", "0.5722276", "0.57203466", "0.5715194", "0.5708438", "0.5703216" ]
0.68899256
0
Clear any lxc variables set in __context__
def _clear_context(): for var in [x for x in __context__ if x.startswith("lxc.")]: log.trace("Clearing __context__['%s']", var) __context__.pop(var, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cxt_vars(cxt):\n if hasattr(cxt, '_cl'):\n del cxt._cl\n if hasattr(cxt, '_pairs'):\n del cxt._pairs", "def context_reset(self):\n self._context_state = None\n logging.info('Resetting the context')", "def controlVariablesClear() :\n s.clearScriptAll()", "def reset_context():\n global root_dir\n global wells_list\n global tops_list\n\n del root_dir, wells_list, tops_list\n root_dir = WellsDir(None, 'root')\n wells_list = list()\n tops_list = list()", "def clear(self):\n self.vars = []", "def reset(self, *_):\n with self._context.lock:\n super().reset()\n self.__context_init()", "def clear_locals(self):\n self._locals = dict()", "def reset(self):\n\n self.type = None\n self.additional_context = \"\"\n super().reset()", "def env_cleanup(self):\n pass", "def reset():\n _runtime.reset()", "def ctxtReset(self):\n libxml2mod.xmlCtxtReset(self._o)", "def clear(self):\n self.contexts.clear()\n self._background_context.diagnostics.clear()", "def __clear_context(self):\n self.calling_view = None\n self.calling_view_index = []\n self.calling_view_is_empty = False\n\n self.current_view = None\n self.current_history_entry = None", "def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}", "def reset(self, env):\n self._env = env\n return", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def hard_reset() -> NoReturn:", "def reset_env(self):\n return self.env.reset()", "def reset(self):\n # Pop all active context.\n while driver.pop_active_context() is not None:\n pass\n\n # If it is the main thread\n if threading.current_thread() == self._mainthread:\n self._destroy_all_contexts()", "def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)", "def __resetLocal__(self):\n pass", "def clear_mpi_env_vars():\n removed_environment = {}\n for k, v in list(os.environ.items()):\n for prefix in ['OMPI_', 'PMI_']:\n if k.startswith(prefix):\n removed_environment[k] = v\n del os.environ[k]\n try:\n yield\n finally:\n os.environ.update(removed_environment)", "def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True", "def unload_context(self):\n if self._simulation is not None:\n del self._simulation.context\n self._simulation = None", "def clear_mpi_env_vars():\n removed_environment = {}\n for k, v in list(os.environ.items()):\n for prefix in [\"OMPI_\", \"PMI_\"]:\n if k.startswith(prefix):\n removed_environment[k] = v\n del os.environ[k]\n try:\n yield\n finally:\n os.environ.update(removed_environment)", "def clean(_context):", "def reset(cls):\r\n cls._GLOBAL = cls()", "def reset(self):\n self.in_compact_method = False\n self.in_setup = False\n self.autoname_cursor = dict()", "def reset(self):\n self._varstate = None\n self.frozen = False" ]
[ "0.7920712", "0.7487981", "0.66750956", "0.66598994", "0.662593", "0.65864086", "0.6567458", "0.65108454", "0.6506112", "0.63643444", "0.63367957", "0.63300246", "0.62848675", "0.62818927", "0.6278924", "0.6275691", "0.62550807", "0.6248847", "0.62454367", "0.62208486", "0.6214666", "0.6201736", "0.61899364", "0.6179122", "0.61746544", "0.61741024", "0.6173836", "0.6160465", "0.6154957", "0.6152526" ]
0.86192983
0
Search which bridges are potentially available as LXC bridges
def search_lxc_bridges(): bridges = __context__.get("lxc.bridges", None) # either match not yet called or no bridges were found # to handle the case where lxc was not installed on the first # call if not bridges: bridges = set() running_bridges = set() bridges.add(DEFAULT_BR) try: output = __salt__["cmd.run_all"]("brctl show") for line in output["stdout"].splitlines()[1:]: if not line.startswith(" "): running_bridges.add(line.split()[0].strip()) except (SaltInvocationError, CommandExecutionError): pass for ifc, ip in __grains__.get("ip_interfaces", {}).items(): if ifc in running_bridges: bridges.add(ifc) elif os.path.exists(f"/sys/devices/virtual/net/{ifc}/bridge"): bridges.add(ifc) bridges = list(bridges) # if we found interfaces that have lxc in their names # we filter them as being the potential lxc bridges # we also try to default on br0 on other cases def sort_bridges(a): pref = "z" if "lxc" in a: pref = "a" elif "br0" == a: pref = "c" return f"{pref}_{a}" bridges.sort(key=sort_bridges) __context__["lxc.bridges"] = bridges return bridges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_lxc_bridge():\n return search_lxc_bridges()[0]", "def list_bridges(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Bridge\"]", "def bridge(gwc = 0, brc = bridge_int):\n# bridge interface list\n br_interface = []\n# bridge ip addresses list\n gw_ipaddr = []\n# bridge network list\n gw_network = []\n# gatweway start number list\n gw_number = 0\n\n# fill all lists for bridge\n for i in netifaces.ifaddresses(bridge_int)[netifaces.AF_INET]:\n br_interface.append([gw_number, ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n gw_network.append(i['netmask'])\n gw_number = gw_number + 1\n br_interface[0][1] = bridge_int\n\n if gwc == 'check':\n return (br_interface, gw_ipaddr, gw_network)\n\n# print jadm gateways table\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n\n# return bridge interface name, ip addresses and network mask\n return (br_interface, gw_ipaddr, gw_network)", "def find_bridge(i, j, chain) :\n B = chain.bridges_dict\n br = None\n for b in B.keys() :\n if (B[b].lumen1 == i and B[b].lumen2 == j) or (B[b].lumen1 == j and B[b].lumen2 == i) :\n br = b\n if br == None :\n print('No bridge found to connect these lumens ('+str(i)+', '+str(j)+') !')\n return br", "def bridge_search(start, connection, components):\n connecting = [c for c in components if connection in c]\n if not connecting:\n return [start]\n bridges = []\n for comp in connecting:\n remaining = components[:]\n remaining.remove(comp)\n new_connection = comp[0] if comp[0] != connection else comp[1]\n bridges += bridge_search(\n start + [comp], new_connection, remaining)\n return bridges", "def do_bridges(self, line):\n print('List of Bridges \\n')\n print('ID\\tAddress')\n\n for index, device in enumerate(self.huuey.bridges):\n print u\"{index}\\t{device}\".format(index=index+1, device=device['internalipaddress'])", "def get_lights(bridge):\n\n target_names = [\n \"Console Lamp\",\n \"Bedroom Table Lamp\",\n \"Kitchen light\",\n ]\n\n targets = [light for light in bridge.lights if light.name in target_names]\n\n if len(targets) != len(target_names):\n print(\"%s: not found ... %s\" % (target_names, targets))\n exit(1)\n\n return targets", "def find_connections():\n # print \"External\"\n # print findservices('00:0D:93:19:C8:68')\n # print findservices('bc:f5:ac:84:81:0c')\n # print finddevices()\n # print findservices(gethostaddr())\n # print gethostclass()\n print \"Your address: \", lb.gethostaddr()\n print lb.finddevicename(lb.gethostaddr())\n s = lb.socket()\n #s.bind((\"\", 0)) # RFCOMM port\n #s.bind((\"\", 1)) # RFCOMM port\n s.bind((\"\", 2)) # RFCOMM port\n print \"About to listen\"\n s.listen(1)\n print \"About to advertise\"\n lb.advertise(\"LightBlueService\", s, lb.RFCOMM)\n print \"Advertised at {} and listening on channel {}...\".format(s.getsockname()[0], s.getsockname()[1])\n print \"Waiting to accept\"\n # s.setblocking(1)\n try:\n conn, addr = s.accept()\n except KeyboardInterrupt:\n print \"Closing connection due to keyboard intterupt\"\n s.close()\n raise KeyboardInterrupt\n # Set timeout for 1 second\n # s.settimeout(1.0)\n print \"Connected by\", addr\n return conn, addr, s", "def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network", "def needs_bridging(self, has_verbal_autopsy):\n sources_to_bridge_map = [\n \"India_SCD_states_rural\", \"India_CRS\",\n \"India_MCCD_states_ICD9\", \"India_MCCD_states_ICD10\",\n \"India_Maharashtra_SCD\", \"India_MCCD_Orissa_ICD10\",\n \"India_MCCD_Delhi_ICD10\", \"ICD9_BTL\", \"Russia_FMD_1989_1998\",\n \"China_1991_2002\", \"ICD9_USSR_Tabulation\", \"ICD10_tabulated\",\n \"Thailand_Public_Health_Statistics\", \"India_SRS_states_report\",\n \"ICD8A\", \"UKR_databank_ICD10_tab\", \"Russia_FMD_ICD9\",\n 'Iran_Mohsen_special_ICD10'\n ]\n\n if has_verbal_autopsy | (self.source in sources_to_bridge_map):\n return True\n else:\n return False", "def detect_bridge():\n # Initialize color ranges for detection\n color_range = [Color(\"Brug\", [0, 0, 0], [0, 255, 107]),\n Color(\"Gat\", [0, 0, 0], [0, 0, 255]),\n Color(\"Rand\", [0, 0, 185], [0, 0, 255]),\n Color(\"White-ish\", [0, 0, 68], [180, 98, 255])]\n\n cam = Recognize(color_range)\n cam.run()", "def test_get_pci_switch_list(self):\n pass", "def bt_scan():\n print(\"Searching for nearby devices...\")\n explore_devices = []\n if explorepy._bt_interface == 'sdk':\n device_manager = explorepy.exploresdk.ExploreSDK_Create()\n nearby_devices = device_manager.PerformDeviceSearch()\n for bt_device in nearby_devices:\n if \"Explore\" in bt_device.name:\n print(\"Device found: %s - %s\" % (bt_device.name, bt_device.address))\n explore_devices.append((bt_device.name, bt_device.address))\n else:\n import bluetooth\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n for address, name in nearby_devices:\n if \"Explore\" in name:\n print(\"Device found: %s - %s\" % (name, address))\n explore_devices.append((address, name))\n\n if not nearby_devices:\n print(\"No Devices found\")\n\n return explore_devices", "def find_neighbors(index, bridges_dict) :\n neighbors = []\n for k in bridges_dict.keys() :\n if bridges_dict[k].lumen1 == index :\n neighbors += [bridges_dict[k].lumen2]\n elif bridges_dict[k].lumen2 == index :\n neighbors += [bridges_dict[k].lumen1]\n return neighbors", "def linux():\n command = \"cat /etc/NetworkManager/system-connections/*\"\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n return networks", "def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers", "def check_any_light_on(bridge):\n for i,group in bridge.get_group().items():\n if group['state']['any_on']:\n return True\n return False", "def get_bridges(edges_list):\n\n # print(\"all edges:\", edges_list)\n\n # make a temporary graph\n temp_G = nx.Graph()\n\n # add all current edges to the graph\n for edge in edges_list:\n edge_node_1, edge_node_2 = edge\n temp_G.add_edge(edge_node_1, edge_node_2)\n\n # get all_bridges in temp graph\n bridges_all = list(nx.bridges(temp_G))\n\n # get set of edges with two traversals left (only want one of each, so use set)\n mult_trav_remaining = set([])\n\n for edge in edges_list:\n\n num_trav_remaining = edges_list.count(edge)\n\n if num_trav_remaining > 1:\n\n mult_trav_remaining.add(edge)\n\n mult_trav_remaining = list(mult_trav_remaining)\n\n # remove mult traversal edges from bridges list\n\n # print(\"bridges_ all:\", bridges_all)\n # print(\"\\nmult_trav_remaining:\", mult_trav_remaining)\n\n # make a new bridges list that contains only edges that don't have mult traversals left\n\n bridges_reduced = []\n\n for edge in bridges_all:\n # print(\"\\n\\nedge:\", edge)\n # print()\n if edge in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} is in {mult_trav_remaining}\")\n elif edge[::-1] in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} REVERSED is in {mult_trav_remaining}\")\n else:\n # print(f\"bridge {edge} is NOT in {mult_trav_remaining}\")\n\n bridges_reduced.append(edge)\n\n # return a list of true bridges\n return bridges_reduced", "def process_hosts(root_bridge_ips: Iterable[str], community: str,\n do_recurse=False, all_ports=False, resolve_hostnames=True) -> \\\n Tuple[Dict[str, Bridge], Dict[str, str], Dict[str, List[str]], Dict[str, str], str]:\n ips_to_visit = set(list(root_bridge_ips))\n visited_chassis_ids = set()\n visited_ips = set()\n\n bridges: Dict[str, Bridge] = {}\n all_bridge_macs = set()\n arp = {}\n\n walk = partial(snmp_walk, community=community)\n\n while ips_to_visit:\n host = ips_to_visit.pop()\n if host in visited_ips:\n continue\n visited_ips.add(host)\n\n print(\"VISITING\", host, file=sys.stderr)\n\n # Skip if chassis ID not found or has already been seen\n try:\n lldpLocChassisId = walk(host, '1.0.8802.1.1.2.1.3.2', 'hex').values()\n except ConnectionError as e:\n print(str(e) + f\" -- skipping {host}!\", file=sys.stderr)\n continue\n\n if not lldpLocChassisId:\n print(f\"Got no ChassisId from {host} -- missing LLDP support?\", file=sys.stderr)\n continue\n lldpLocChassisId = tuple(lldpLocChassisId)[0]\n if lldpLocChassisId in visited_chassis_ids:\n continue\n visited_chassis_ids.add(lldpLocChassisId)\n\n all_bridge_macs.add(lldpLocChassisId) # chassis id looks like a MAC and some switches use it for all their ports\n\n print(\" - Getting local info...\", file=sys.stderr)\n\n # Check that it's a bridge\n lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n is_bridge = (lldpLocSysCapSupported & 32) != 0\n if not is_bridge:\n print(f\"Host {host} does not announce Bridge type LLDP capability. Skipping.\", file=sys.stderr)\n continue\n\n dot1dTpFdbPort_to_portnum = {int(k): v for (k, v) in walk(host, '1.3.6.1.2.1.17.1.4.1.2', 'int').items()}\n\n # Find local management IP addresses (if supported)\n local_ips = set()\n lldpLocManAddrIfId = walk(host, '1.0.8802.1.1.2.1.3.8.1.5', 'preview') # local man addresses\n for oid, port_id in lldpLocManAddrIfId.items():\n local_ips.add(read_ipv4_from_oid_tail(oid))\n\n lldpLocSysName = walk(host, '1.0.8802.1.1.2.1.3.3')\n lldpLocSysDesc = walk(host, '1.0.8802.1.1.2.1.3.4')\n\n this_bridge = Bridge(\n chassis_id=lldpLocChassisId,\n ip_addresses=list({host} | local_ips),\n name=next(iter(lldpLocSysName.values())),\n desc=next(iter(lldpLocSysDesc.values())) or '',\n neighbors=[],\n ports=defaultdict(lambda: Port(name='', speed=0, remote_macs=[], remote_ips=[], local_mac=None, interlink=False)))\n\n # Find IP addresses to neighbor bridges\n print(\" - Getting neighbors...\", file=sys.stderr)\n lldpRemManAddrTable = walk(host, '1.0.8802.1.1.2.1.4.2.1.4', 'preview')\n for oid, port_id in lldpRemManAddrTable.items():\n time_mark, local_port_num, rem_index, addr_subtype, *rest = split_numbers(oid)\n if addr_subtype == 1: # ipv4\n if do_recurse:\n ips_to_visit.add(read_ipv4_from_oid_tail(oid))\n\n # Port names\n print(\" - Getting ports...\", file=sys.stderr)\n for port, name in walk(host, '1.3.6.1.2.1.31.1.1.1.1', 'any').items(): # ifName\n this_bridge.ports[int(port)].name = name\n # Port speeds\n for port, speed in walk(host, '1.3.6.1.2.1.31.1.1.1.15', 'int').items(): # ifHighSpeed\n this_bridge.ports[int(port)].speed = speed\n # Local port macs\n for port, mac in walk(host, '1.3.6.1.2.1.2.2.1.6', 'hex').items(): # ifPhysAddress\n this_bridge.ports[int(port)].local_mac = mac\n all_bridge_macs.add(mac)\n\n # Read ARP table\n print(\" - Reading device ARP table...\", file=sys.stderr)\n atPhysAddress = walk(host, '1.3.6.1.2.1.3.1.1.2', 'hex')\n for oid, mac in atPhysAddress.items():\n ip = read_ipv4_from_oid_tail(oid, with_len=False)\n arp[ip] = mac\n\n # Map remote (learned) MACs to ports\n print(\" - Getting MACs for ports...\", file=sys.stderr)\n macs_per_port = defaultdict(set)\n ports_per_mac = defaultdict(set)\n dot1qTpFdbPort = walk(host, '1.3.6.1.2.1.17.7.1.2.2.1.2', 'int')\n for k, port_idx in dot1qTpFdbPort.items():\n port = port_idx\n if port_idx in dot1dTpFdbPort_to_portnum:\n port = dot1dTpFdbPort_to_portnum[port_idx]\n parts = split_numbers(k)\n vlan = int(parts[0])\n if port:\n mac = ''.join([('%02x' % x) for x in parts[1:]])\n if mac != '0000000000':\n assert(port in this_bridge.ports)\n if mac not in this_bridge.ports[port].remote_macs:\n this_bridge.ports[port].remote_macs.append(mac)\n macs_per_port[port].add(mac)\n ports_per_mac[mac].add(port)\n\n #lldpRemSysCapSupported = walk(host, '1.0.8802.1.1.2.1.4.1.1.11', 'hex')\n ##lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n ##is_bridge = (lldpLocSysCapSupported & 32) != 0\n #print(lldpRemSysCapSupported, file=sys.stderr)\n\n print(\" - Getting remotes...\", file=sys.stderr)\n lldpRemChassisId = walk(host, '1.0.8802.1.1.2.1.4.1.1.5', 'hex')\n for k, chassis_id in lldpRemChassisId.items():\n time_mark, port, idx = split_numbers(k)\n if chassis_id not in this_bridge.neighbors:\n this_bridge.neighbors.append(chassis_id)\n\n this_bridge.ports = dict(this_bridge.ports)\n bridges[this_bridge.chassis_id] = this_bridge\n\n\n # Just to be sure: lookup MACs for visited bridge IPs\n for ip, mac in arp.items():\n if ip in visited_ips:\n all_bridge_macs.add(mac)\n\n # Reverse ARP table (MAC -> set of IPs)\n rarp = {}\n for k, v in arp.items():\n rarp.setdefault(v, set()).add(k)\n\n # Find hostnames for ip addresses using multiple threads (the query is VERY slow)\n ip_to_hostname = {}\n with PoolExecutor(max_workers=50) as executor:\n ips = []\n for b in bridges.values():\n ips.extend(b.ip_addresses)\n for p in b.ports.values():\n for mac in [*p.remote_macs, p.local_mac, b.chassis_id]:\n ips.extend(rarp.get(mac) or [])\n ips = set(ips)\n\n def fetch_name(ip):\n try:\n return socket.gethostbyaddr(ip)\n except (socket.gaierror, socket.herror):\n return [None, [], [ip]]\n\n if resolve_hostnames:\n print(f\"Resolving hostnames for {len(ips)} IP addresses...\", file=sys.stderr)\n for res in executor.map(fetch_name, ips):\n for ip in res[2]:\n if res[0]:\n ip_to_hostname[ip] = res[0]\n\n # Cleanup and extend some values\n print(\"Cleaning up and extending...\", file=sys.stderr)\n for b in bridges.values():\n print(f\" - Bridge {b.name}...\", file=sys.stderr)\n\n # Replace macs with NeighborInfos in neighbor lists\n print(\" - extending NeighborInfos...\", file=sys.stderr)\n neigh_infos = []\n for chassis_id in b.neighbors:\n ni = NeighborInfo(is_bridge=False, name='', ips=[], macs=[chassis_id], chassis_id=chassis_id)\n b2 = bridges.get(chassis_id)\n if b2:\n ni = NeighborInfo(is_bridge=True, name=b2.name, ips=list(b2.ip_addresses), chassis_id=chassis_id,\n macs=list({chassis_id, *[p.local_mac for p in b2.ports.values()]}))\n\n ni.in_ports = list({k for k,p in b.ports.items() if (set(ni.macs).intersection(set(p.remote_macs)))})\n for ips in ((rarp.get(m) or []) for m in ni.macs):\n ni.ips.extend(ips)\n ni.ips = list(set(ni.ips))\n ni.name = ni.name or ip_to_hostname.get([*ni.ips, ''][0]) or ''\n neigh_infos.append(ni)\n\n b.neighbors = neigh_infos\n\n # Delete unused ports from results\n if not all_ports:\n print(\" - filtering unused ports...\", file=sys.stderr)\n b.ports = {k: v for k, v in b.ports.items() if (v.remote_macs or v.remote_ips)}\n\n # Update port contents\n print(\" - updating port contents...\", file=sys.stderr)\n for p in b.ports.values():\n # Mark all ports with bridge management addresses as \"interlink\"\n for bm in all_bridge_macs:\n p.interlink |= (bm in p.remote_macs)\n # Add a list of IP addresses seen behind a port\n for mac in p.remote_macs:\n p.remote_ips.extend(rarp.get(mac) or [])\n p.remote_macs = sorted(p.remote_macs)\n p.remote_ips = sorted(list(set(p.remote_ips))) # prune duplicates\n\n # Sort for nicer output TODO: \"natural sorting\" for IPs\n print(\"Sort ARP tables...\", file=sys.stderr)\n arp = dict(sorted(arp.items()))\n rarp = dict(sorted(rarp.items()))\n\n res_dict = {\n 'timestamp': time.time(),\n 'bridges': [b.as_dict() for b in bridges.values()],\n 'arp': arp,\n 'rarp': {k: list(v) for k, v in rarp.items()},\n 'ip_to_hostname': ip_to_hostname\n }\n\n return bridges, arp, rarp, ip_to_hostname, json.dumps(res_dict, indent=4)", "def bridges(species1_names, species2_names):\n k12 = filter(lambda s: re.search('K-12',s)!=None, species1_names)[0]\n return [(k12, species2_names[0]), (k12, species2_names[1]), (k12, species2_names[2])]", "def should_build_ib():\n ib_util_found = False\n ib_lib_found = False\n ib_header_found = False\n\n try:\n # If the command doesn't exist, we can directly return instead of\n # making a subprocess call\n full_cmd_path = get_command_path(IB_DEVINFO_CMD)\n if not full_cmd_path:\n ib_util_found = False\n subprocess.check_output([full_cmd_path, \"--list\"])\n # Here we just would like to simply run the command to test if IB\n # related tools / lib are installed without parsing the output. We\n # will enable IB build as long as the command runs successfully.\n #\n # The output should look like either:\n #\n # > ibv_devinfo --list\n # 0 HCAs founds:\n #\n # or\n #\n # > ibv_devinfo --list\n # 4 HCAs found:\n # mlx5_3\n # mlx5_2\n # mlx5_1\n # mlx5_0\n ib_util_found = True\n except Exception:\n # We just take all the exceptions here without affecting the build\n ib_util_found = False\n\n lib_paths = list(filter(bool, [\n \"/usr/lib/\",\n \"/usr/lib/x86_64-linux-gnu/\",\n \"/usr/lib/powerpc64le-linux-gnu/\",\n \"/usr/lib/aarch64-linux-gnu/\",\n ] + gather_paths([\n \"LIBRARY_PATH\",\n ]) + gather_paths([\n \"LD_LIBRARY_PATH\",\n ])))\n\n include_paths = [\n \"/usr/include/\",\n ]\n\n if IS_CONDA:\n lib_paths.append(os.path.join(CONDA_DIR, \"lib\"))\n include_paths.append(os.path.join(CONDA_DIR, \"include\"))\n\n for path in lib_paths:\n if path is None or not os.path.exists(path):\n continue\n ib_libraries = sorted(glob.glob(os.path.join(path, \"libibverbs*\")))\n if ib_libraries:\n ib_lib_found = True\n break\n\n for path in include_paths:\n if path is None or not os.path.exists(path):\n continue\n if os.path.exists(os.path.join(path, \"infiniband/verbs.h\")):\n ib_header_found = True\n break\n\n return ib_util_found and ib_lib_found and ib_lib_found", "def get_bgp_neighbors(context, target):\n\n response = context.get_operation(\"get_bgp_neighbors\")\n neighbors = [ row[\"neighbor\"] for row in response ]\n return neighbors", "def test_get_pci_link_list(self):\n pass", "def get_GWs_by_LB(elbname):\n M = []\n elb = boto3.client('elb')\n ec2 = boto3.client(\"ec2\")\n ElbGW = elb.describe_instance_health(LoadBalancerName=elbname)\n GWs = ec2.describe_instances(Filters=[{\"Name\": \"instance-id\", \"Values\": [x['InstanceId'] for x in ElbGW['InstanceStates']]}])\n for re in GWs['Reservations']:\n for ins in re['Instances']:\n M.append([ins['InstanceId'],\n [y['State'] for y in ElbGW['InstanceStates'] if y['InstanceId'] == ins['InstanceId']].pop(),\n ins['Placement']['AvailabilityZone'],\n ins['VpcId'],\n sorted([\n [eni['Attachment']['DeviceIndex'],\n eni['NetworkInterfaceId'],\n eni['SourceDestCheck']] for eni in ins['NetworkInterfaces']\n ]\n ),\n ins['SubnetId']])\n return M", "def gateway_subnets(self) -> \"list[SubnetAffiliation]\":\n return [subnet for subnet in self.subnets\n if subnet.gateway and subnet.ip_address]", "def getNetIfaceList(path):\n except_list = [\"bonding_masters\"]\n\n if os.path.exists(path):\n iface_list = [i for i in os.listdir(path) if i not in except_list]\n return iface_list\n\n else:\n return False", "def ngbr_hears_me(self):\n found_me = False\n for ngbr_data in self._ngbrs.values():\n if ngbr_data[\"BCN_CNT\"] > 0:\n # FIXME: bcn frame no longer carries ngbr list\n # frame = ngbr_data[\"BCN_FRAME\"]\n # bcn = frame.cmd\n # assert type(bcn) is HeymacCmdBcn\n # ngbrs_ngbrs = bcn.get_field(HeymacCmd.FLD_NGBRS)\n # if self._lnk_addr in ngbrs_ngbrs:\n # found_me = True\n pass\n return found_me", "def get_all_cyborgs(lights_off=True):\n \n if not USBLIB_AVAILABLE:\n return []\n \n retlist=[]\n #patch the dll loader for openelec systems (or we'll get no backend available error)\n if is_openelec():\n def openelec_loader(find_library=None):\n return ctypes.CDLL('libusb-1.0.so')\n \n libusb1._load_library=openelec_loader\n \n devs=usb.core.find(find_all=True,idVendor=VENDOR,idProduct=PRODUCT)\n for dev in devs:\n c=Cyborg(dev)\n try:\n c.initialize(lights_off)\n retlist.append(c)\n except:\n import traceback\n ex=traceback.format_exc()\n sys.stderr.write(\"Cyborg initialization failed : %s\"%ex)\n\n return retlist", "def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )", "def get_ngbrs_lnk_addrs(self):\n return self._ngbrs.keys()" ]
[ "0.69621164", "0.6684033", "0.6341434", "0.6117558", "0.59657085", "0.5825182", "0.5812872", "0.58122027", "0.567891", "0.55831635", "0.5522842", "0.54669815", "0.5444635", "0.54241157", "0.5409699", "0.53783625", "0.52660066", "0.5216317", "0.5207417", "0.5168369", "0.5164274", "0.5152005", "0.514487", "0.5133551", "0.5114847", "0.51073736", "0.5090478", "0.5081337", "0.5079233", "0.5072932" ]
0.82118034
0
Search the first bridge which is potentially available as LXC bridge
def search_lxc_bridge(): return search_lxc_bridges()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_lxc_bridges():\n bridges = __context__.get(\"lxc.bridges\", None)\n # either match not yet called or no bridges were found\n # to handle the case where lxc was not installed on the first\n # call\n if not bridges:\n bridges = set()\n running_bridges = set()\n bridges.add(DEFAULT_BR)\n try:\n output = __salt__[\"cmd.run_all\"](\"brctl show\")\n for line in output[\"stdout\"].splitlines()[1:]:\n if not line.startswith(\" \"):\n running_bridges.add(line.split()[0].strip())\n except (SaltInvocationError, CommandExecutionError):\n pass\n for ifc, ip in __grains__.get(\"ip_interfaces\", {}).items():\n if ifc in running_bridges:\n bridges.add(ifc)\n elif os.path.exists(f\"/sys/devices/virtual/net/{ifc}/bridge\"):\n bridges.add(ifc)\n bridges = list(bridges)\n # if we found interfaces that have lxc in their names\n # we filter them as being the potential lxc bridges\n # we also try to default on br0 on other cases\n\n def sort_bridges(a):\n pref = \"z\"\n if \"lxc\" in a:\n pref = \"a\"\n elif \"br0\" == a:\n pref = \"c\"\n return f\"{pref}_{a}\"\n\n bridges.sort(key=sort_bridges)\n __context__[\"lxc.bridges\"] = bridges\n return bridges", "def find_bridge(i, j, chain) :\n B = chain.bridges_dict\n br = None\n for b in B.keys() :\n if (B[b].lumen1 == i and B[b].lumen2 == j) or (B[b].lumen1 == j and B[b].lumen2 == i) :\n br = b\n if br == None :\n print('No bridge found to connect these lumens ('+str(i)+', '+str(j)+') !')\n return br", "def bridge(gwc = 0, brc = bridge_int):\n# bridge interface list\n br_interface = []\n# bridge ip addresses list\n gw_ipaddr = []\n# bridge network list\n gw_network = []\n# gatweway start number list\n gw_number = 0\n\n# fill all lists for bridge\n for i in netifaces.ifaddresses(bridge_int)[netifaces.AF_INET]:\n br_interface.append([gw_number, ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n gw_network.append(i['netmask'])\n gw_number = gw_number + 1\n br_interface[0][1] = bridge_int\n\n if gwc == 'check':\n return (br_interface, gw_ipaddr, gw_network)\n\n# print jadm gateways table\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n\n# return bridge interface name, ip addresses and network mask\n return (br_interface, gw_ipaddr, gw_network)", "def bridge_search(start, connection, components):\n connecting = [c for c in components if connection in c]\n if not connecting:\n return [start]\n bridges = []\n for comp in connecting:\n remaining = components[:]\n remaining.remove(comp)\n new_connection = comp[0] if comp[0] != connection else comp[1]\n bridges += bridge_search(\n start + [comp], new_connection, remaining)\n return bridges", "def get_bridge(self, name):\n try:\n assert name in self.list_bridges()\n return self.devices[name]\n except KeyError:\n raise UnknownDevice(name)", "def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network", "def find_connections():\n # print \"External\"\n # print findservices('00:0D:93:19:C8:68')\n # print findservices('bc:f5:ac:84:81:0c')\n # print finddevices()\n # print findservices(gethostaddr())\n # print gethostclass()\n print \"Your address: \", lb.gethostaddr()\n print lb.finddevicename(lb.gethostaddr())\n s = lb.socket()\n #s.bind((\"\", 0)) # RFCOMM port\n #s.bind((\"\", 1)) # RFCOMM port\n s.bind((\"\", 2)) # RFCOMM port\n print \"About to listen\"\n s.listen(1)\n print \"About to advertise\"\n lb.advertise(\"LightBlueService\", s, lb.RFCOMM)\n print \"Advertised at {} and listening on channel {}...\".format(s.getsockname()[0], s.getsockname()[1])\n print \"Waiting to accept\"\n # s.setblocking(1)\n try:\n conn, addr = s.accept()\n except KeyboardInterrupt:\n print \"Closing connection due to keyboard intterupt\"\n s.close()\n raise KeyboardInterrupt\n # Set timeout for 1 second\n # s.settimeout(1.0)\n print \"Connected by\", addr\n return conn, addr, s", "def list_bridges(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Bridge\"]", "def _ovs_br_ex_port_is_dpdk_interface(self):\n cmd = (\n 'dpdk-devbind.py --status-dev net '\n '| grep ^$(ovs-vsctl --bare --columns options '\n 'find interface external_ids:charm-ovn-chassis=br-ex '\n '|cut -f2 -d=)'\n '|grep \"drv=vfio-pci unused=$\"')\n for unit in zaza.model.get_units(self.application_name):\n zaza.utilities.juju.remote_run(\n unit.name, cmd, model_name=self.model_name, fatal=True)", "def find_target_device(ble_device, name):\r\n scan_report = ble_device.scanner.start_scan().wait()\r\n\r\n for report in scan_report.advertising_peers_found:\r\n if report.advertise_data.local_name == name:\r\n return report.peer_address", "def find_stick():\n out = subprocess.check_output(\n \"gdbus introspect --system --dest org.freedesktop.UDisks \"\n \"--object-path /org/freedesktop/UDisks/devices --recurse \"\n \"--only-properties\".split())\n devs = zip(*((re.match(r\".* = '?(.*?)'?;\", x).group(1)\n for x in out.splitlines()\n if \"DriveConnectionInterface =\" in x\n or \"DeviceIsPartition =\" in x\n or \"DeviceFile = \" in x),)*3)\n try:\n return next(dev[2] for dev in devs if dev[0] == 'usb'\n and dev[1] == 'true')\n except StopIteration:\n return None", "def process_hosts(root_bridge_ips: Iterable[str], community: str,\n do_recurse=False, all_ports=False, resolve_hostnames=True) -> \\\n Tuple[Dict[str, Bridge], Dict[str, str], Dict[str, List[str]], Dict[str, str], str]:\n ips_to_visit = set(list(root_bridge_ips))\n visited_chassis_ids = set()\n visited_ips = set()\n\n bridges: Dict[str, Bridge] = {}\n all_bridge_macs = set()\n arp = {}\n\n walk = partial(snmp_walk, community=community)\n\n while ips_to_visit:\n host = ips_to_visit.pop()\n if host in visited_ips:\n continue\n visited_ips.add(host)\n\n print(\"VISITING\", host, file=sys.stderr)\n\n # Skip if chassis ID not found or has already been seen\n try:\n lldpLocChassisId = walk(host, '1.0.8802.1.1.2.1.3.2', 'hex').values()\n except ConnectionError as e:\n print(str(e) + f\" -- skipping {host}!\", file=sys.stderr)\n continue\n\n if not lldpLocChassisId:\n print(f\"Got no ChassisId from {host} -- missing LLDP support?\", file=sys.stderr)\n continue\n lldpLocChassisId = tuple(lldpLocChassisId)[0]\n if lldpLocChassisId in visited_chassis_ids:\n continue\n visited_chassis_ids.add(lldpLocChassisId)\n\n all_bridge_macs.add(lldpLocChassisId) # chassis id looks like a MAC and some switches use it for all their ports\n\n print(\" - Getting local info...\", file=sys.stderr)\n\n # Check that it's a bridge\n lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n is_bridge = (lldpLocSysCapSupported & 32) != 0\n if not is_bridge:\n print(f\"Host {host} does not announce Bridge type LLDP capability. Skipping.\", file=sys.stderr)\n continue\n\n dot1dTpFdbPort_to_portnum = {int(k): v for (k, v) in walk(host, '1.3.6.1.2.1.17.1.4.1.2', 'int').items()}\n\n # Find local management IP addresses (if supported)\n local_ips = set()\n lldpLocManAddrIfId = walk(host, '1.0.8802.1.1.2.1.3.8.1.5', 'preview') # local man addresses\n for oid, port_id in lldpLocManAddrIfId.items():\n local_ips.add(read_ipv4_from_oid_tail(oid))\n\n lldpLocSysName = walk(host, '1.0.8802.1.1.2.1.3.3')\n lldpLocSysDesc = walk(host, '1.0.8802.1.1.2.1.3.4')\n\n this_bridge = Bridge(\n chassis_id=lldpLocChassisId,\n ip_addresses=list({host} | local_ips),\n name=next(iter(lldpLocSysName.values())),\n desc=next(iter(lldpLocSysDesc.values())) or '',\n neighbors=[],\n ports=defaultdict(lambda: Port(name='', speed=0, remote_macs=[], remote_ips=[], local_mac=None, interlink=False)))\n\n # Find IP addresses to neighbor bridges\n print(\" - Getting neighbors...\", file=sys.stderr)\n lldpRemManAddrTable = walk(host, '1.0.8802.1.1.2.1.4.2.1.4', 'preview')\n for oid, port_id in lldpRemManAddrTable.items():\n time_mark, local_port_num, rem_index, addr_subtype, *rest = split_numbers(oid)\n if addr_subtype == 1: # ipv4\n if do_recurse:\n ips_to_visit.add(read_ipv4_from_oid_tail(oid))\n\n # Port names\n print(\" - Getting ports...\", file=sys.stderr)\n for port, name in walk(host, '1.3.6.1.2.1.31.1.1.1.1', 'any').items(): # ifName\n this_bridge.ports[int(port)].name = name\n # Port speeds\n for port, speed in walk(host, '1.3.6.1.2.1.31.1.1.1.15', 'int').items(): # ifHighSpeed\n this_bridge.ports[int(port)].speed = speed\n # Local port macs\n for port, mac in walk(host, '1.3.6.1.2.1.2.2.1.6', 'hex').items(): # ifPhysAddress\n this_bridge.ports[int(port)].local_mac = mac\n all_bridge_macs.add(mac)\n\n # Read ARP table\n print(\" - Reading device ARP table...\", file=sys.stderr)\n atPhysAddress = walk(host, '1.3.6.1.2.1.3.1.1.2', 'hex')\n for oid, mac in atPhysAddress.items():\n ip = read_ipv4_from_oid_tail(oid, with_len=False)\n arp[ip] = mac\n\n # Map remote (learned) MACs to ports\n print(\" - Getting MACs for ports...\", file=sys.stderr)\n macs_per_port = defaultdict(set)\n ports_per_mac = defaultdict(set)\n dot1qTpFdbPort = walk(host, '1.3.6.1.2.1.17.7.1.2.2.1.2', 'int')\n for k, port_idx in dot1qTpFdbPort.items():\n port = port_idx\n if port_idx in dot1dTpFdbPort_to_portnum:\n port = dot1dTpFdbPort_to_portnum[port_idx]\n parts = split_numbers(k)\n vlan = int(parts[0])\n if port:\n mac = ''.join([('%02x' % x) for x in parts[1:]])\n if mac != '0000000000':\n assert(port in this_bridge.ports)\n if mac not in this_bridge.ports[port].remote_macs:\n this_bridge.ports[port].remote_macs.append(mac)\n macs_per_port[port].add(mac)\n ports_per_mac[mac].add(port)\n\n #lldpRemSysCapSupported = walk(host, '1.0.8802.1.1.2.1.4.1.1.11', 'hex')\n ##lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n ##is_bridge = (lldpLocSysCapSupported & 32) != 0\n #print(lldpRemSysCapSupported, file=sys.stderr)\n\n print(\" - Getting remotes...\", file=sys.stderr)\n lldpRemChassisId = walk(host, '1.0.8802.1.1.2.1.4.1.1.5', 'hex')\n for k, chassis_id in lldpRemChassisId.items():\n time_mark, port, idx = split_numbers(k)\n if chassis_id not in this_bridge.neighbors:\n this_bridge.neighbors.append(chassis_id)\n\n this_bridge.ports = dict(this_bridge.ports)\n bridges[this_bridge.chassis_id] = this_bridge\n\n\n # Just to be sure: lookup MACs for visited bridge IPs\n for ip, mac in arp.items():\n if ip in visited_ips:\n all_bridge_macs.add(mac)\n\n # Reverse ARP table (MAC -> set of IPs)\n rarp = {}\n for k, v in arp.items():\n rarp.setdefault(v, set()).add(k)\n\n # Find hostnames for ip addresses using multiple threads (the query is VERY slow)\n ip_to_hostname = {}\n with PoolExecutor(max_workers=50) as executor:\n ips = []\n for b in bridges.values():\n ips.extend(b.ip_addresses)\n for p in b.ports.values():\n for mac in [*p.remote_macs, p.local_mac, b.chassis_id]:\n ips.extend(rarp.get(mac) or [])\n ips = set(ips)\n\n def fetch_name(ip):\n try:\n return socket.gethostbyaddr(ip)\n except (socket.gaierror, socket.herror):\n return [None, [], [ip]]\n\n if resolve_hostnames:\n print(f\"Resolving hostnames for {len(ips)} IP addresses...\", file=sys.stderr)\n for res in executor.map(fetch_name, ips):\n for ip in res[2]:\n if res[0]:\n ip_to_hostname[ip] = res[0]\n\n # Cleanup and extend some values\n print(\"Cleaning up and extending...\", file=sys.stderr)\n for b in bridges.values():\n print(f\" - Bridge {b.name}...\", file=sys.stderr)\n\n # Replace macs with NeighborInfos in neighbor lists\n print(\" - extending NeighborInfos...\", file=sys.stderr)\n neigh_infos = []\n for chassis_id in b.neighbors:\n ni = NeighborInfo(is_bridge=False, name='', ips=[], macs=[chassis_id], chassis_id=chassis_id)\n b2 = bridges.get(chassis_id)\n if b2:\n ni = NeighborInfo(is_bridge=True, name=b2.name, ips=list(b2.ip_addresses), chassis_id=chassis_id,\n macs=list({chassis_id, *[p.local_mac for p in b2.ports.values()]}))\n\n ni.in_ports = list({k for k,p in b.ports.items() if (set(ni.macs).intersection(set(p.remote_macs)))})\n for ips in ((rarp.get(m) or []) for m in ni.macs):\n ni.ips.extend(ips)\n ni.ips = list(set(ni.ips))\n ni.name = ni.name or ip_to_hostname.get([*ni.ips, ''][0]) or ''\n neigh_infos.append(ni)\n\n b.neighbors = neigh_infos\n\n # Delete unused ports from results\n if not all_ports:\n print(\" - filtering unused ports...\", file=sys.stderr)\n b.ports = {k: v for k, v in b.ports.items() if (v.remote_macs or v.remote_ips)}\n\n # Update port contents\n print(\" - updating port contents...\", file=sys.stderr)\n for p in b.ports.values():\n # Mark all ports with bridge management addresses as \"interlink\"\n for bm in all_bridge_macs:\n p.interlink |= (bm in p.remote_macs)\n # Add a list of IP addresses seen behind a port\n for mac in p.remote_macs:\n p.remote_ips.extend(rarp.get(mac) or [])\n p.remote_macs = sorted(p.remote_macs)\n p.remote_ips = sorted(list(set(p.remote_ips))) # prune duplicates\n\n # Sort for nicer output TODO: \"natural sorting\" for IPs\n print(\"Sort ARP tables...\", file=sys.stderr)\n arp = dict(sorted(arp.items()))\n rarp = dict(sorted(rarp.items()))\n\n res_dict = {\n 'timestamp': time.time(),\n 'bridges': [b.as_dict() for b in bridges.values()],\n 'arp': arp,\n 'rarp': {k: list(v) for k, v in rarp.items()},\n 'ip_to_hostname': ip_to_hostname\n }\n\n return bridges, arp, rarp, ip_to_hostname, json.dumps(res_dict, indent=4)", "def bt_scan():\n print(\"Searching for nearby devices...\")\n explore_devices = []\n if explorepy._bt_interface == 'sdk':\n device_manager = explorepy.exploresdk.ExploreSDK_Create()\n nearby_devices = device_manager.PerformDeviceSearch()\n for bt_device in nearby_devices:\n if \"Explore\" in bt_device.name:\n print(\"Device found: %s - %s\" % (bt_device.name, bt_device.address))\n explore_devices.append((bt_device.name, bt_device.address))\n else:\n import bluetooth\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n for address, name in nearby_devices:\n if \"Explore\" in name:\n print(\"Device found: %s - %s\" % (name, address))\n explore_devices.append((address, name))\n\n if not nearby_devices:\n print(\"No Devices found\")\n\n return explore_devices", "def _scan_once(self):\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n\n # Event explicity doesn't contain ip (it might change)\n # or id (its in the device path)\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n\n if event is not None:\n logging.debug('Hue bridge \\'%s\\' (%s) found at %s - linked=%s',\n bridge_name, bridge_id, bridge_ip, event['linked'])\n\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n\n # Now find all the lights\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug('Hue light %d (\\'%s\\') found on bridge \\'%s\\', on=%s',\n light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details", "def bridge_no_such_device(self, event):\n ret = self.get_results_stats(event.results)\n if ret:\n return {'bridge-no-such-device': ret}, 'ovs-vswitchd'", "def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers", "def find_elb ( elb_conn, elb_name ) :\n try :\n elb_r = elb_conn.get_all_load_balancers( load_balancer_names = [ elb_name ] )\n if len( elb_r ) > 0 :\n return elb_r[ 0 ]\n except :\n return None", "def _find_device(self):\n found_device = False\n nearby_devices = None\n try:\n nearby_devices = self._adapter.scan()\n except Exception:\n pass\n\n if nearby_devices is not None:\n for device in nearby_devices:\n name = device['name']\n if name is not None and name.startswith(self._search_name):\n self._address = device['address']\n print(f'Found device named: {name} at {self._address}')\n found_device = True\n break\n\n return found_device", "def bridgeIF(self):\r\n return self._bridgeIF", "def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None", "def find_by_status(self, host, state):", "def get_lights(bridge):\n\n target_names = [\n \"Console Lamp\",\n \"Bedroom Table Lamp\",\n \"Kitchen light\",\n ]\n\n targets = [light for light in bridge.lights if light.name in target_names]\n\n if len(targets) != len(target_names):\n print(\"%s: not found ... %s\" % (target_names, targets))\n exit(1)\n\n return targets", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def detect_bridge():\n # Initialize color ranges for detection\n color_range = [Color(\"Brug\", [0, 0, 0], [0, 255, 107]),\n Color(\"Gat\", [0, 0, 0], [0, 0, 255]),\n Color(\"Rand\", [0, 0, 185], [0, 0, 255]),\n Color(\"White-ish\", [0, 0, 68], [180, 98, 255])]\n\n cam = Recognize(color_range)\n cam.run()", "def _ovs_br_ex_port_is_system_interface(self):\n cmd = ('ip link show dev $(ovs-vsctl --bare --columns name '\n 'find port external_ids:charm-ovn-chassis=br-ex)')\n for unit in zaza.model.get_units(self.application_name):\n zaza.utilities.juju.remote_run(\n unit.name, cmd, model_name=self.model_name, fatal=True)", "def get_net_obj(host, object_type, name, refresh=False):\n objs = get_net_objs(host=host, object_type=object_type, refresh=refresh)\n obj_name = name.lower()\n if objs is not None:\n for obj in objs:\n if object_type == \"portgroup\" or object_type == \"proxyswitch\":\n if obj.spec.name.lower() == obj_name:\n return obj\n elif object_type == \"pnic\" or object_type == \"vnic\":\n if obj.device.lower() == obj_name:\n return obj\n elif obj.name.lower() == obj_name:\n return obj\n return None", "def identify_remote_router(remote_address):\n global DATA\n port = remote_address[1]\n for every_router in DATA[\"neighbor\"]:\n if every_router[2] is port:\n return every_router[0]", "def _ovs_br_ex_interface_not_in_error(self):\n cmd = (\n 'ovs-vsctl --bare --columns error '\n 'find interface external_ids:charm-ovn-chassis=br-ex')\n for unit in zaza.model.get_units(self.application_name):\n result = zaza.utilities.juju.remote_run(\n unit.name,\n cmd,\n model_name=self.model_name,\n fatal=True).rstrip()\n assert result == '', result", "def test_home_bridge(mock_pre_serv):\n bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')\n\n assert bridge.display_name == 'TestBridge'\n assert bridge.pincode == b'123-45-678'\n assert len(bridge.services) == 2\n\n assert bridge.services[0].display_name == SERV_ACCESSORY_INFO\n assert bridge.services[1].display_name == SERV_BRIDGING_STATE\n\n char_model = bridge.services[0].get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.bridge'", "def get_first_network_interface_matching(self, predicate):\n for network in self.raw_vm.network:\n if predicate(network):\n return network\n return None" ]
[ "0.7521935", "0.67016417", "0.61291337", "0.5933337", "0.58334655", "0.57994896", "0.5795875", "0.55923074", "0.55749744", "0.55578643", "0.55345637", "0.5504249", "0.5502816", "0.5501173", "0.5464856", "0.545991", "0.54234004", "0.5419086", "0.5386185", "0.5383288", "0.5365224", "0.53510714", "0.5313928", "0.5310648", "0.52871466", "0.52799636", "0.52301013", "0.5216136", "0.5209843", "0.52028644" ]
0.8340334
0
Interface between salt.cloud.lxc driver and lxc.init ``vm_`` is a mapping of vm opts in the salt.cloud format as documented for the lxc driver.
def cloud_init_interface(name, vm_=None, **kwargs): if vm_ is None: vm_ = {} vm_ = copy.deepcopy(vm_) vm_ = salt.utils.dictupdate.update(vm_, kwargs) profile_data = copy.deepcopy(vm_.get("lxc_profile", vm_.get("profile", {}))) if not isinstance(profile_data, (dict, (str,))): profile_data = {} profile = get_container_profile(profile_data) def _cloud_get(k, default=None): return vm_.get(k, profile.get(k, default)) if name is None: name = vm_["name"] # if we are on ubuntu, default to ubuntu default_template = "" if __grains__.get("os", "") in ["Ubuntu"]: default_template = "ubuntu" image = _cloud_get("image") if not image: _cloud_get("template", default_template) backing = _cloud_get("backing", "dir") if image: profile["template"] = image vgname = _cloud_get("vgname", None) if vgname: profile["vgname"] = vgname if backing: profile["backing"] = backing snapshot = _cloud_get("snapshot", False) autostart = bool(_cloud_get("autostart", True)) dnsservers = _cloud_get("dnsservers", []) dns_via_dhcp = _cloud_get("dns_via_dhcp", True) password = _cloud_get("password", "s3cr3t") password_encrypted = _cloud_get("password_encrypted", False) fstype = _cloud_get("fstype", None) lvname = _cloud_get("lvname", None) thinpool = _cloud_get("thinpool", None) pub_key = _cloud_get("pub_key", None) priv_key = _cloud_get("priv_key", None) size = _cloud_get("size", "20G") script = _cloud_get("script", None) script_args = _cloud_get("script_args", None) users = _cloud_get("users", None) if users is None: users = [] ssh_username = _cloud_get("ssh_username", None) if ssh_username and (ssh_username not in users): users.append(ssh_username) network_profile = _cloud_get("network_profile", None) nic_opts = kwargs.get("nic_opts", None) netmask = _cloud_get("netmask", "24") path = _cloud_get("path", None) bridge = _cloud_get("bridge", None) gateway = _cloud_get("gateway", None) unconditional_install = _cloud_get("unconditional_install", False) force_install = _cloud_get("force_install", True) config = _get_salt_config(_cloud_get("config", {}), **vm_) default_nic = _cloud_get("default_nic", DEFAULT_NIC) # do the interface with lxc.init mainly via nic_opts # to avoid extra and confusing extra use cases. if not isinstance(nic_opts, dict): nic_opts = salt.utils.odict.OrderedDict() # have a reference to the default nic eth0 = nic_opts.setdefault(default_nic, salt.utils.odict.OrderedDict()) # lxc config is based of ifc order, be sure to use odicts. if not isinstance(nic_opts, salt.utils.odict.OrderedDict): bnic_opts = salt.utils.odict.OrderedDict() bnic_opts.update(nic_opts) nic_opts = bnic_opts gw = None # legacy salt.cloud scheme for network interfaces settings support bridge = _cloud_get("bridge", None) ip = _cloud_get("ip", None) mac = _cloud_get("mac", None) if ip: fullip = ip if netmask: fullip += f"/{netmask}" eth0["ipv4"] = fullip if mac is not None: eth0["mac"] = mac for ix, iopts in enumerate(_cloud_get("additional_ips", [])): ifh = f"eth{ix + 1}" ethx = nic_opts.setdefault(ifh, {}) if gw is None: gw = iopts.get("gateway", ethx.get("gateway", None)) if gw: # only one and only one default gateway is allowed ! eth0.pop("gateway", None) gateway = None # even if the gateway if on default "eth0" nic # and we popped it will work # as we reinject or set it here. ethx["gateway"] = gw elink = iopts.get("link", ethx.get("link", None)) if elink: ethx["link"] = elink # allow dhcp aip = iopts.get("ipv4", iopts.get("ip", None)) if aip: ethx["ipv4"] = aip nm = iopts.get("netmask", "") if nm: ethx["ipv4"] += f"/{nm}" for i in ("mac", "hwaddr"): if i in iopts: ethx["mac"] = iopts[i] break if "mac" not in ethx: ethx["mac"] = salt.utils.network.gen_mac() # last round checking for unique gateway and such gw = None for ethx in [a for a in nic_opts]: ndata = nic_opts[ethx] if gw: ndata.pop("gateway", None) if "gateway" in ndata: gw = ndata["gateway"] gateway = None # only use a default bridge / gateway if we configured them # via the legacy salt cloud configuration style. # On other cases, we should rely on settings provided by the new # salt lxc network profile style configuration which can # be also be overridden or a per interface basis via the nic_opts dict. if bridge: eth0["link"] = bridge if gateway: eth0["gateway"] = gateway # lxc_init_interface = {} lxc_init_interface["name"] = name lxc_init_interface["config"] = config lxc_init_interface["memory"] = _cloud_get("memory", 0) # nolimit lxc_init_interface["pub_key"] = pub_key lxc_init_interface["priv_key"] = priv_key lxc_init_interface["nic_opts"] = nic_opts for clone_from in ["clone_from", "clone", "from_container"]: # clone_from should default to None if not available lxc_init_interface["clone_from"] = _cloud_get(clone_from, None) if lxc_init_interface["clone_from"] is not None: break lxc_init_interface["profile"] = profile lxc_init_interface["snapshot"] = snapshot lxc_init_interface["dnsservers"] = dnsservers lxc_init_interface["fstype"] = fstype lxc_init_interface["path"] = path lxc_init_interface["vgname"] = vgname lxc_init_interface["size"] = size lxc_init_interface["lvname"] = lvname lxc_init_interface["thinpool"] = thinpool lxc_init_interface["force_install"] = force_install lxc_init_interface["unconditional_install"] = unconditional_install lxc_init_interface["bootstrap_url"] = script lxc_init_interface["bootstrap_args"] = script_args lxc_init_interface["bootstrap_shell"] = _cloud_get("bootstrap_shell", "sh") lxc_init_interface["bootstrap_delay"] = _cloud_get("bootstrap_delay", None) lxc_init_interface["autostart"] = autostart lxc_init_interface["users"] = users lxc_init_interface["password"] = password lxc_init_interface["password_encrypted"] = password_encrypted # be sure not to let objects goes inside the return # as this return will be msgpacked for use in the runner ! lxc_init_interface["network_profile"] = network_profile for i in ["cpu", "cpuset", "cpushare"]: if _cloud_get(i, None): try: lxc_init_interface[i] = vm_[i] except KeyError: lxc_init_interface[i] = profile[i] return lxc_init_interface
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cloud_init(name, vm_=None, **kwargs):\n init_interface = cloud_init_interface(name, vm_, **kwargs)\n name = init_interface.pop(\"name\", name)\n return init(name, **init_interface)", "def __init__(self, conn, vm_name):\n self.vm = self.get_obj(conn, vim.VirtualMachine, vm_name)\n assert(self.vm)\n self.spec = vim.vm.ConfigSpec()", "def do_lsvm(self, args):\n l_opt = False\n hosts = None\n img_opt = owner_opt = type_opt = vlan_opt = None\n if args:\n args = args.split()\n missing_arg = 'missing value after'\n while args:\n arg = args.pop(0)\n if arg == '-l':\n l_opt = True\n elif arg == '-g':\n type_opt = 'generic'\n elif arg == '-s':\n type_opt = 'stc'\n elif arg in ('-i', '--image'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n img_opt = args.pop(0)\n elif arg in ('-u', '--user'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n owner_opt = args.pop(0)\n elif arg in ('-v', '--vlan'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n vlan_opt = int(args.pop(0))\n elif arg == '--mine':\n if not self._user:\n print('login required for', arg, file=sys.stderr)\n return\n owner_opt = self._user\n elif arg[0] == '-':\n print('unrecognized argument:', arg, file=sys.stderr)\n return\n else:\n args.insert(0, arg)\n hosts = args\n break\n\n if hosts:\n print('---> hosts:', hosts)\n active_vms = self._qm.get_active_vms(hosts)\n else:\n active_vms = self._qm.get_active_vms()\n\n print('Active VM instances:')\n keep = []\n while active_vms:\n avm = active_vms.pop()\n if type_opt and avm.get('vm_type') != type_opt:\n continue\n if vlan_opt and avm.get('vlan_id') != vlan_opt:\n continue\n if owner_opt and avm.get('owner') != owner_opt:\n continue\n if img_opt and avm.get('image_src') != img_opt:\n continue\n keep.append(avm)\n\n for avm in keep:\n img = avm.pop('image_src')\n vlan = avm.pop('vlan_id')\n vmip = avm.pop('vm_ip')\n owner = avm.pop('owner')\n running = datetime.timedelta(seconds=int(avm.pop('run_time')))\n if not vmip:\n vmip = self._qm.get_vm_ip(avm['vm_id'])\n print('%-15s %-4s %s %s (%s)' %\n (vmip, vlan, img, owner, running))\n if l_opt:\n for k, v in avm.viewitems():\n print(' %s: %s' % (k,v))\n print()", "def __init__(self, machine, vm_name):\n self.machine = machine\n self.vm_name = vm_name", "def __init__(self, libvirt_info):\n\t\tsuper(VM, self).__init__()\n\t\tself.libvirt_info = libvirt_info", "def create(vm_):\n name = vm_[\"name\"]\n record = {}\n ret = {}\n\n # fire creating event\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"starting create\",\n \"salt/cloud/{}/creating\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n log.debug(\"Adding %s to cloud cache.\", name)\n __utils__[\"cloud.cachedir_index_add\"](\n vm_[\"name\"], vm_[\"profile\"], \"xen\", vm_[\"driver\"]\n )\n\n # connect to xen\n session = _get_session()\n\n # determine resource pool\n resource_pool = _determine_resource_pool(session, vm_)\n\n # determine storage repo\n storage_repo = _determine_storage_repo(session, resource_pool, vm_)\n\n # build VM\n image = vm_.get(\"image\")\n clone = vm_.get(\"clone\")\n if clone is None:\n clone = True\n log.debug(\"Clone: %s \", clone)\n\n # fire event to read new vm properties (requesting)\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"requesting instance\",\n \"salt/cloud/{}/requesting\".format(name),\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n # create by cloning template\n if clone:\n _clone_vm(image, name, session)\n else:\n _copy_vm(image, name, session, storage_repo)\n\n # provision template to vm\n _provision_vm(name, session)\n vm = _get_vm(name, session)\n\n # start vm\n start(name, None, session)\n\n # get new VM\n vm = _get_vm(name, session)\n\n # wait for vm to report IP via guest tools\n _wait_for_ip(name, session)\n\n # set static IP if configured\n _set_static_ip(name, session, vm_)\n\n # if not deploying salt then exit\n deploy = vm_.get(\"deploy\", True)\n log.debug(\"delopy is set to %s\", deploy)\n if deploy:\n record = session.xenapi.VM.get_record(vm)\n if record is not None:\n _deploy_salt_minion(name, session, vm_)\n else:\n log.debug(\"The Salt minion will not be installed, deploy: %s\", vm_[\"deploy\"])\n record = session.xenapi.VM.get_record(vm)\n ret = show_instance(name)\n ret.update({\"extra\": record})\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"created instance\",\n \"salt/cloud/{}/created\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n return ret", "def mountLXCPrivate(self,node,vmid):\n post_data = None\n data = self.connect('post','nodes/%s/lxc/%s/status/mount' % (node,vmid), post_data)\n return data", "def startLXCContainer(self,node,vmid):\n post_data = None\n data = self.connect('post','nodes/%s/lxc/%s/status/start' % (node,vmid), post_data)\n return data", "def launch_vm(vm_id, vm_metadata):\n print('\\nCreating disk and vm with ID:', vm_id)\n vm_metadata['vm_id'] = vm_id\n ram_mbs, num_cpus, num_gpus = required_resources_for_method(\n vm_metadata['method'],\n bool(vm_metadata['pretrained_r_nets_path']))\n\n create_disk_cmd = (\n 'gcloud compute disks create '\n '\"{disk_name}\" --zone \"{zone}\" --source-snapshot \"{source_snapshot}\" '\n '--type \"pd-standard\" --project=\"{gcloud_project}\" '\n '--size=200GB'.format(\n disk_name=vm_id,\n zone=ZONE,\n source_snapshot=SOURCE_SNAPSHOT,\n gcloud_project=GCLOUD_PROJECT,\n ))\n print('Calling', create_disk_cmd)\n # Don't fail if disk already exists.\n subprocess.call(create_disk_cmd, shell=True)\n\n create_instance_cmd = (\n 'gcloud compute --project={gcloud_project} instances create '\n '{instance_name} --zone={zone} --machine-type={machine_type} '\n '--subnet=default --network-tier=PREMIUM --maintenance-policy=TERMINATE '\n '--service-account={service_account} '\n '--scopes=storage-full,compute-rw '\n '--accelerator=type=nvidia-tesla-p100,count={gpu_count} '\n '--disk=name={disk_name},device-name={disk_name},mode=rw,boot=yes,'\n 'auto-delete=yes --restart-on-failure '\n '--metadata-from-file startup-script=./scripts/vm_drop_root.sh '\n '--metadata {vm_metadata} --async'.format(\n instance_name=vm_id,\n zone=ZONE,\n machine_type='custom-{num_cpus}-{ram_mbs}'.format(\n num_cpus=num_cpus, ram_mbs=ram_mbs),\n gpu_count=num_gpus,\n disk_name=vm_id,\n vm_metadata=(\n ','.join('{}={}'.format(k, v) for k, v in vm_metadata.items())),\n gcloud_project=GCLOUD_PROJECT,\n service_account=SERVICE_ACCOUNT,\n ))\n\n print('Calling', create_instance_cmd)\n subprocess.check_call(create_instance_cmd, shell=True)", "def test_initialize_default(self, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar'})\n libvirt_mock.open.assert_called_with('lxc:///')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)", "def boot_VM(self, server, vmfile):\n vmaddr = \"\"\n fail = False\n \n #print vmfile\n #-----read template into string -------------------------\n #s=open('./share/examples/ubuntu_context.one','r').read()\n \n s = open(os.path.expanduser(vmfile), 'r').read()\n #self.logger.debug(\"Vm template:\\n\"+s)\n \n #-----Start VM-------------------------------------------\n vm = server.one.vm.allocate(self.oneauth, s)\n \n #print self.oneauth\n #print vm\n \n if vm[0]:\n self.logger.debug(\"VM ID: \" + str(vm[1]))\n \n #monitor VM\n booted = False\n maxretry = self.wait_max / 5 #time that the VM has to change from penn to runn \n retry = 0\n while not booted and retry < maxretry: #eventually the VM has to boot or fail\n try:\n #-------Get Info about VM -------------------------------\n vminfo = server.one.vm.info(self.oneauth, vm[1])\n #print vminfo[1]\n manifest = parseString(vminfo[1])\n \n #VM_status (init=0, pend=1, act=3, fail=7)\n vm_status = manifest.getElementsByTagName('STATE')[0].firstChild.nodeValue.strip()\n \n if vm_status == \"3\": #running\n #LCM_status (prol=1,boot=2,runn=3, fail=14, unk=16)\n lcm_status = manifest.getElementsByTagName('LCM_STATE')[0].firstChild.nodeValue.strip()\n \n if lcm_status == \"3\": #if vm_status is 3, this will be 3 too.\n booted = True\n elif vm_status == \"7\": #fail\n self.logger.error(\"Fail to deploy VM \" + str(vm[1]))\n booted = True\n fail = True\n vmaddr = \"fail\"\n elif vm_status == \"6\": #done\n self.logger.error(\"The status of the VM \" + str(vm[1]) + \" is DONE\")\n booted = True\n fail = True\n vmaddr = \"fail\"\n else:\n retry += 1\n time.sleep(5)\n except:\n pass\n if retry >= maxretry:\n self.logger.error(\"The VM \" + str(vm[1]) + \" did not change to runn status. Please verify that the status of the OpenNebula hosts \"\n \"or increase the wait time in the configuration file (max_wait) \\n\")\n vmaddr = \"fail\"\n fail = True\n if not fail:\n #get IP\n nics = manifest.getElementsByTagName('NIC')\n \n for i in range(len(nics)):\n if(nics[i].childNodes[0].firstChild.nodeValue.strip() == self.bridge):\n vmaddr = nics[i].childNodes[1].firstChild.nodeValue.strip()\n if vmaddr.strip() != \"\":\n self.logger.debug(\"IP of the VM \" + str(vm[1]) + \" is \" + str(vmaddr))\n \n access = False\n maxretry = 240 #this says that we wait 20 minutes maximum to allow the VM get online. \n #this also prevent to get here forever if the ssh key was not injected propertly.\n retry = 0\n self.logger.debug(\"Waiting to have access to VM\")\n while not access and retry < maxretry:\n cmd = \"ssh -q -oBatchMode=yes root@\" + vmaddr + \" uname\"\n p = Popen(cmd, shell=True, stdout=PIPE)\n status = os.waitpid(p.pid, 0)[1]\n #print status\n if status == 0:\n access = True\n self.logger.debug(\"The VM \" + str(vm[1]) + \" with ip \" + str(vmaddr) + \"is accessible\")\n else:\n retry += 1\n time.sleep(5)\n if retry >= maxretry:\n self.logger.error(\"Could not get access to the VM \" + str(vm[1]) + \" with ip \" + str(vmaddr) + \"\\n\" \n \"Please verify the OpenNebula templates to make sure that the public ssh key to be injected is accessible to the oneadmin user. \\n\"\n \"Also verify that the VM has ssh server and is active on boot.\")\n vmaddr = \"fail\"\n else:\n self.logger.error(\"Could not determine the IP of the VM \" + str(vm[1]) + \" for the bridge \" + self.bridge)\n vmaddr = \"fail\"\n else:\n vmaddr = \"fail\"\n \n return [vmaddr, vm[1]]", "def lxd_init(self, iface):\n lxd_init_cmds = [\n self.set_lxd_init_auto,\n self.set_lxc_config,\n self.set_lxd_storage,\n partial(self.setup_bridge_network, iface),\n self.setup_unused_bridge_network,\n self.set_default_profile\n ]\n\n for cmd in lxd_init_cmds:\n app.log.debug(\"LXD Init: {}\".format(cmd))\n cmd()", "def setLXCContainerOptions(self,node,vmid,post_data):\n data = self.connect('put',\"nodes/%s/lxc/%s/config\" % (node,vmid), post_data)\n return data", "def create_vm(cmd, client, resource_group_name, vm_name,\n private_cloud, template, resource_pool,\n amount_of_ram=None, number_of_cores=None,\n location=None, expose_to_guest_vm=None,\n nics=None, disks=None):\n from .vendored_sdks.models import VirtualMachine\n from .vendored_sdks.models import ResourcePool\n from ._config import PATH_CHAR\n\n resource_pool = ResourcePool(id=resource_pool)\n\n # Extracting template and private cloud name from the resource id\n template_name = template.rsplit(PATH_CHAR, 1)[-1]\n private_cloud_name = private_cloud.rsplit(PATH_CHAR, 1)[-1]\n vm_template = client.virtual_machine_templates.get(location, private_cloud_name, template_name)\n\n cores = number_of_cores or vm_template.number_of_cores\n ram = amount_of_ram or vm_template.amount_of_ram\n\n expose = vm_template.expose_to_guest_vm\n if expose_to_guest_vm is not None:\n expose = expose_to_guest_vm\n\n final_disks = vm_template.disks\n if disks is not None:\n final_disks = _modify_template_disks_according_to_input(final_disks, disks)\n\n final_nics = vm_template.nics\n if nics is not None:\n final_nics = _modify_template_nics_according_to_input(final_nics, nics, cmd, client,\n resource_group_name, vm_name,\n location, private_cloud)\n\n virtual_machine = VirtualMachine(location=location,\n amount_of_ram=ram,\n disks=final_disks,\n expose_to_guest_vm=expose,\n nics=final_nics,\n number_of_cores=cores,\n private_cloud_id=private_cloud,\n resource_pool=resource_pool,\n template_id=template)\n\n return client.virtual_machines.create_or_update(resource_group_name, vm_name, virtual_machine)", "def start_virtual_machine(self, vm):\n try:\n self.client.start_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def __init__(self):\n logger.debug(\"VMPoolManager: _init_()\")\n self.system = State.Instance()\n \n self.VMPools = []\n e = EnvSetUp()\n config_spec = json.loads(open(e.get_ovpl_directory_path() + \"/config/config.json\").read())\n pools = config_spec[\"VMPOOL_CONFIGURATION\"][\"VMPOOLS\"]\n create_uri = config_spec[\"API_ENDPOINTS\"][\"CREATE_URI_ADAPTER_ENDPOINT\"]\n destroy_uri = config_spec[\"API_ENDPOINTS\"][\"DESTROY_URI_ADAPTER_ENDPOINT\"]\n\n for pool in pools:\n self.add_vm_pool( pool[\"POOLID\"], \\\n pool[\"DESCRIPTION\"], \\\n pool[\"ADAPTERIP\"], \\\n pool[\"PORT\"], \\\n create_uri, \\\n destroy_uri)\n\n logger.debug(\"VMPoolManager: _init_(); vm_pools = %s\" % (str(self.VMPools)))", "def boot(self, **kwargs):\n\n cloud = kwargs.get('cloud', Default.cloud)\n name = kwargs.get('name', Vm.generate_vm_name())\n image = kwargs.get('image', Default.image)\n flavor = kwargs.get('flavor', Default.flavor)\n key = kwargs.get('key', Default.key)\n secgroup = kwargs.get('secgroup', Default.secgroup)\n group = kwargs.get('group', Default.group)\n username = kwargs.get('username', Image.guess_username(image))\n cluster = kwargs.get('cluster', None)\n\n # shorthand for getting a dict of all the vm details\n #\n # IMPORTANT: anything declared prior to the call to `locals()`\n # may be passed to `Vm.boot`, so make sure that only parameters are\n # defined above this comment.\n details = locals()\n details.pop('kwargs')\n\n # currently, Vm.boot returns the instance UUID from the provider for openstack images\n # 2016/12/12\n uuid = Vm.boot(**details)\n\n\n # helper function: the Vm.boot only returns a UUID, but we\n # need to use the VM model instead. Additionally, we'll need\n # to poll the VM to wait until it is active.\n #\n # The kwargs are used to select the item from the DB:\n # eg: uuid=???, cm_id=???, etc\n def get_vm(**kwargs):\n \"\"\"Selects the VM based on the given properties\"\"\"\n model = self.db.vm_table_from_provider('openstack')\n vm = self.db.select(model, **kwargs).all()\n assert len(vm) == 1, vm\n vm = vm[0]\n return vm\n\n # get the VM from the UUID\n vm = get_vm(uuid=uuid)\n cm_id = vm.cm_id\n\n def is_active():\n Vm.refresh(cloud=cloud)\n vm = get_vm(cm_id=cm_id)\n return vm.status == 'ACTIVE'\n\n if not exponential_backoff(is_active):\n Console.error('Failed to get ACTIVE vm within timeframe')\n raise ValueError\n\n assert is_active()\n vm = get_vm(cm_id=cm_id)\n assert isinstance(vm, VM_OPENSTACK), vm.__class__\n\n return OpenstackNode(model=vm, provider=self)", "def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n agent_upgrade: Optional[pulumi.Input['AgentUpgradeArgs']] = None,\n client_public_key: Optional[pulumi.Input[str]] = None,\n extensions: Optional[pulumi.Input[Sequence[pulumi.Input['MachineExtensionInstanceViewArgs']]]] = None,\n identity: Optional[pulumi.Input['IdentityArgs']] = None,\n location: Optional[pulumi.Input[str]] = None,\n location_data: Optional[pulumi.Input['LocationDataArgs']] = None,\n machine_name: Optional[pulumi.Input[str]] = None,\n mssql_discovered: Optional[pulumi.Input[str]] = None,\n os_profile: Optional[pulumi.Input['OSProfileArgs']] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n parent_cluster_resource_id: Optional[pulumi.Input[str]] = None,\n private_link_scope_resource_id: Optional[pulumi.Input[str]] = None,\n service_statuses: Optional[pulumi.Input['ServiceStatusesArgs']] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vm_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if agent_upgrade is not None:\n pulumi.set(__self__, \"agent_upgrade\", agent_upgrade)\n if client_public_key is not None:\n pulumi.set(__self__, \"client_public_key\", client_public_key)\n if extensions is not None:\n pulumi.set(__self__, \"extensions\", extensions)\n if identity is not None:\n pulumi.set(__self__, \"identity\", identity)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if location_data is not None:\n pulumi.set(__self__, \"location_data\", location_data)\n if machine_name is not None:\n pulumi.set(__self__, \"machine_name\", machine_name)\n if mssql_discovered is not None:\n pulumi.set(__self__, \"mssql_discovered\", mssql_discovered)\n if os_profile is not None:\n pulumi.set(__self__, \"os_profile\", os_profile)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if parent_cluster_resource_id is not None:\n pulumi.set(__self__, \"parent_cluster_resource_id\", parent_cluster_resource_id)\n if private_link_scope_resource_id is not None:\n pulumi.set(__self__, \"private_link_scope_resource_id\", private_link_scope_resource_id)\n if service_statuses is not None:\n pulumi.set(__self__, \"service_statuses\", service_statuses)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if vm_id is not None:\n pulumi.set(__self__, \"vm_id\", vm_id)", "def launch_lxc(app, ports):\n print 'Launching blank LXC for: ', app\n tstart = datetime.now()\n os.system('lxc-create -t alpine -f lxcConfig -n %s' % (app))\n tend = datetime.now()\n os.system('lxc-start -n %s' % app)\n ip = check_ip_status(app)\n install_ssh(app)\n forward_ports(app, ports, ip)\n gen_key(app)\n print 'Time elapsed for launch_lxc:', tend - tstart\n return ip", "def vm_define(vm_hostname):\n\n vm_dataset_obj = Query({'hostname': vm_hostname}, VM_ATTRIBUTES).get()\n hv = Hypervisor(vm_dataset_obj['hypervisor'])\n vm = VM(vm_dataset_obj, hv)\n\n hv.define_vm(vm)\n vm.start()\n\n log.info('VM {} defined and booted on {}'.format(\n vm_hostname, vm_dataset_obj['hypervisor']['hostname']))", "def from_vm(self, vm):\n if not vm.created:\n print \"this VM is not created, so you cann't create a node from it\"\n self.name = vm.name\n self.vm = vm\n if \"seed\" in vm.name:\n self.type = \"SEED\"\n elif \"client\" in vm.name:\n self.type = \"CLIENT\"\n else:\n self.type = \"REGULAR\"", "def pre_virtual_machine_interface_create(self, resource_dict):\n pass", "def __init__(self, session, vm):\n super().__init__(session)\n self.vm = vm", "def start_vm(client, resource_group_name, vm_name):\n return client.start(resource_group_name, vm_name)", "def create_vm(args):\n if not args.disk and not args.pool:\n print(\"Either --disk or --pool option must be specified\", file=sys.stderr)\n return 1\n\n if args.disk and args.pool:\n print(\"--disk and --pool options are exclusive\", file=sys.stderr)\n return 1\n if args.pool and not args.disk_size:\n print(\"You must specify a disk size\", file=sys.stderr)\n return 1\n\n if args.net and args.virtual_network:\n print(\"--net and --virtual_network option are exclusive\", file=sys.stderr)\n return 1\n\n # insure unicity in networking options in BM case\n\n _all_net_names = set()\n if args.net:\n for n_name in args.net:\n if n_name not in _all_net_names:\n _all_net_names.add(n_name)\n else:\n print('Duplicate virtual network name [%s], ignore it', n_name)\n\n if '--network' in args.virt:\n sys.stderr.write(\"--network is not a supported option. Please retry without --network option.\\n\")\n return 1\n\n # sanity on extra arguments passed to virt-install(1)\n # some options do not create the guest but display information\n # this is wrongly interpreted as a succcess by underlying layers and we\n # may setup things by mistake\n _virt_install_extra = []\n for _a in args.virt:\n if _a not in ('--print-xml', '--version', '-h', '--help'):\n _virt_install_extra.append(_a)\n\n return oci_utils.kvm.virt.create(name=args.domain,\n root_disk=args.disk,\n pool=args.pool,\n disk_size=args.disk_size,\n network=list(_all_net_names),\n virtual_network=args.virtual_network,\n extra_args=_virt_install_extra)", "def list_vm_impl(**kwargs: Any) -> None:\n try:\n config = configuration.create_transient_list_vm_config(kwargs)\n except configuration.CLIArgumentError as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n instances = scan.find_transient_instances(\n name=config.name, with_ssh=config.with_ssh, timeout=None\n )\n if len(instances) == 0:\n print(\"No running VMs found matching criteria\", file=sys.stderr)\n sys.exit(1)\n\n print(scan.format_instance_table(instances))\n sys.exit(0)", "def __init__(self, *args):\n _snap.TFltVVV_swiginit(self, _snap.new_TFltVVV(*args))", "def new_vm():\n\tcfg_path = input(\"\\n\\nInsert the ClickOS .cfg file absolute path:\\n\")\n\n\tbridge_name = get_bridge_name(cfg_path)\n\tif len(bridge_name) == 0:\n\t\tprint(\"Couldnt find the bridge name.\")\n\t\treturn 0\n\n\tcreate_bridge(bridge_name)\n\n\tboot_vm(cfg_path)\n\n\treturn 1", "def vm(self, vm):\n\n self._vm = vm", "def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_2\" if name is None else name\n super(CliVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n self.add_proc(rift.vcs.DtsPerfTasklet(), mode_active=False)\n self.add_proc(RedisServer(), mode_active=False) \n if not start:\n self.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)" ]
[ "0.61599976", "0.59049606", "0.5878049", "0.56407505", "0.56327516", "0.5571679", "0.55569005", "0.55493927", "0.5482196", "0.5421474", "0.54148704", "0.5407343", "0.5358224", "0.5309503", "0.5301207", "0.5281789", "0.5264342", "0.52136207", "0.5203196", "0.5161978", "0.5124029", "0.51014304", "0.5099542", "0.50990117", "0.50775033", "0.50483996", "0.5023688", "0.5014769", "0.5012329", "0.5005963" ]
0.71750826
0
Network configuration defaults network_profile as for containers, we can either call this function either with a network_profile dict or network profile name in the kwargs nic_opts
def _network_conf(conf_tuples=None, **kwargs): nic = kwargs.get("network_profile", None) ret = [] nic_opts = kwargs.get("nic_opts", {}) if nic_opts is None: # coming from elsewhere nic_opts = {} if not conf_tuples: conf_tuples = [] old = _get_veths(conf_tuples) if not old: old = {} # if we have a profile name, get the profile and load the network settings # this will obviously by default look for a profile called "eth0" # or by what is defined in nic_opts # and complete each nic settings by sane defaults if nic and isinstance(nic, ((str,), dict)): nicp = get_network_profile(nic) else: nicp = {} if DEFAULT_NIC not in nicp: nicp[DEFAULT_NIC] = {} kwargs = copy.deepcopy(kwargs) gateway = kwargs.pop("gateway", None) bridge = kwargs.get("bridge", None) if nic_opts: for dev, args in nic_opts.items(): ethx = nicp.setdefault(dev, {}) try: ethx = salt.utils.dictupdate.update(ethx, args) except AttributeError: raise SaltInvocationError("Invalid nic_opts configuration") ifs = [a for a in nicp] ifs += [a for a in old if a not in nicp] ifs.sort() gateway_set = False for dev in ifs: args = nicp.get(dev, {}) opts = nic_opts.get(dev, {}) if nic_opts else {} old_if = old.get(dev, {}) disable = opts.get("disable", args.get("disable", False)) if disable: continue mac = opts.get( "mac", opts.get("hwaddr", args.get("mac", args.get("hwaddr", ""))) ) type_ = opts.get("type", args.get("type", "")) flags = opts.get("flags", args.get("flags", "")) link = opts.get("link", args.get("link", "")) ipv4 = opts.get("ipv4", args.get("ipv4", "")) ipv6 = opts.get("ipv6", args.get("ipv6", "")) infos = salt.utils.odict.OrderedDict( [ ( "lxc.network.type", { "test": not type_, "value": type_, "old": old_if.get("lxc.network.type"), "default": "veth", }, ), ( "lxc.network.name", {"test": False, "value": dev, "old": dev, "default": dev}, ), ( "lxc.network.flags", { "test": not flags, "value": flags, "old": old_if.get("lxc.network.flags"), "default": "up", }, ), ( "lxc.network.link", { "test": not link, "value": link, "old": old_if.get("lxc.network.link"), "default": search_lxc_bridge(), }, ), ( "lxc.network.hwaddr", { "test": not mac, "value": mac, "old": old_if.get("lxc.network.hwaddr"), "default": salt.utils.network.gen_mac(), }, ), ( "lxc.network.ipv4", { "test": not ipv4, "value": ipv4, "old": old_if.get("lxc.network.ipv4", ""), "default": None, }, ), ( "lxc.network.ipv6", { "test": not ipv6, "value": ipv6, "old": old_if.get("lxc.network.ipv6", ""), "default": None, }, ), ] ) # for each parameter, if not explicitly set, the # config value present in the LXC configuration should # take precedence over the profile configuration for info in list(infos.keys()): bundle = infos[info] if bundle["test"]: if bundle["old"]: bundle["value"] = bundle["old"] elif bundle["default"]: bundle["value"] = bundle["default"] for info, data in infos.items(): if data["value"]: ret.append({info: data["value"]}) for key, val in args.items(): if key == "link" and bridge: val = bridge val = opts.get(key, val) if key in [ "type", "flags", "name", "gateway", "mac", "link", "ipv4", "ipv6", ]: continue ret.append({f"lxc.network.{key}": val}) # gateway (in automode) must be appended following network conf ! if not gateway: gateway = args.get("gateway", None) if gateway is not None and not gateway_set: ret.append({"lxc.network.ipv4.gateway": gateway}) # only one network gateway ;) gateway_set = True # normally, this won't happen # set the gateway if specified even if we did # not managed the network underlying if gateway is not None and not gateway_set: ret.append({"lxc.network.ipv4.gateway": gateway}) # only one network gateway ;) gateway_set = True new = _get_veths(ret) # verify that we did not loose the mac settings for iface in [a for a in new]: ndata = new[iface] nmac = ndata.get("lxc.network.hwaddr", "") ntype = ndata.get("lxc.network.type", "") omac, otype = "", "" if iface in old: odata = old[iface] omac = odata.get("lxc.network.hwaddr", "") otype = odata.get("lxc.network.type", "") # default for network type is setted here # attention not to change the network type # without a good and explicit reason to. if otype and not ntype: ntype = otype if not ntype: ntype = "veth" new[iface]["lxc.network.type"] = ntype if omac and not nmac: new[iface]["lxc.network.hwaddr"] = omac ret = [] for val in new.values(): for row in val: ret.append(salt.utils.odict.OrderedDict([(row, val[row])])) # on old versions of lxc, still support the gateway auto mode # if we didn't explicitly say no to # (lxc.network.ipv4.gateway: auto) if ( Version(version()) <= Version("1.0.7") and True not in ["lxc.network.ipv4.gateway" in a for a in ret] and True in ["lxc.network.ipv4" in a for a in ret] ): ret.append({"lxc.network.ipv4.gateway": "auto"}) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)", "def network_profile(self) -> Optional[pulumi.Input['AgentPoolNetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}", "def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> pulumi.Input['PrivateCloudNetworkConfigArgs']:\n return pulumi.get(self, \"network_config\")", "def nic_add(args):\n name = args.name\n network = args.network\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if network is None:\n common.pprint(\"Missing network. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding Nic to %s...\" % name)\n k.add_nic(name=name, network=network)", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def network_config(self) -> Optional[pulumi.Input['PrivateCloudNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def _prepare_network_profile(self, instance_uuid):\n network_interface = {\n 'location': CONF.azure.location,\n 'ip_configurations': [{\n 'name': instance_uuid,\n 'subnet': {\n 'id': CONF.azure.vsubnet_id\n }\n }]\n }\n try:\n async_nic_creation = \\\n self.network.network_interfaces.create_or_update(\n CONF.azure.resource_group,\n instance_uuid,\n network_interface)\n nic = async_nic_creation.result()\n LOG.info(_LI(\"Create a Nic: %s\"), nic.id)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.NetworkInterfaceCreateFailure(\n reason=six.text_type(e), instance_uuid=instance_uuid)\n raise ex\n network_profile = {\n 'network_interfaces': [{\n 'id': nic.id\n }]\n }\n return network_profile", "def infra_network_profile(self) -> Optional[pulumi.Input['CloudProviderProfileInfraNetworkProfileArgs']]:\n return pulumi.get(self, \"infra_network_profile\")", "def convert_network_configuration(config, dns_servers):\n\n def _get_subnet_part(pcfg, nameservers=None):\n subpart = {'type': 'static',\n 'control': 'auto',\n 'address': pcfg.get('ip_address'),\n 'gateway': pcfg.get('gateway')}\n\n if nameservers:\n subpart['dns_nameservers'] = nameservers\n\n if \":\" in pcfg.get('ip_address'):\n subpart['address'] = \"{0}/{1}\".format(pcfg.get('ip_address'),\n pcfg.get('cidr'))\n else:\n subpart['netmask'] = pcfg.get('netmask')\n\n return subpart\n\n all_nics = []\n for k in ('public', 'private'):\n if k in config:\n all_nics.extend(config[k])\n\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n nic_configs = []\n\n for nic in all_nics:\n\n mac_address = nic.get('mac')\n sysfs_name = macs_to_nics.get(mac_address)\n nic_type = nic.get('type', 'unknown')\n # Note: the entry 'public' above contains a list, but\n # the list will only ever have one nic inside it per digital ocean.\n # If it ever had more than one nic, then this code would\n # assign all 'public' the same name.\n if_name = NIC_MAP.get(nic_type, sysfs_name)\n\n LOG.debug(\"mapped %s interface to %s, assigning name of %s\",\n mac_address, sysfs_name, if_name)\n\n ncfg = {'type': 'physical',\n 'mac_address': mac_address,\n 'name': if_name}\n\n subnets = []\n for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):\n raw_subnet = nic.get(netdef, None)\n if not raw_subnet:\n continue\n\n sub_part = _get_subnet_part(raw_subnet)\n if nic_type == 'public' and 'anchor' not in netdef:\n # add DNS resolvers to the public interfaces only\n sub_part = _get_subnet_part(raw_subnet, dns_servers)\n else:\n # remove the gateway any non-public interfaces\n if 'gateway' in sub_part:\n del sub_part['gateway']\n\n subnets.append(sub_part)\n\n ncfg['subnets'] = subnets\n nic_configs.append(ncfg)\n LOG.debug(\"nic '%s' configuration: %s\", if_name, ncfg)\n\n return {'version': 1, 'config': nic_configs}", "def network_profile(self) -> Optional['outputs.ClusterPoolResourcePropertiesResponseNetworkProfile']:\n return pulumi.get(self, \"network_profile\")", "def create_nic_parameters(subnet_id, address_pool_id):\n return {\n 'location': azureRegion,\n 'ip_configurations': [{\n 'name': IP_CONFIG_NAME,\n 'subnet': {\n 'id': subnet_id\n },\n 'load_balancer_backend_address_pools': [{\n 'id': address_pool_id\n }]\n }]\n }", "def __init__(__self__, *,\n infra_network_profile: Optional[pulumi.Input['CloudProviderProfileInfraNetworkProfileArgs']] = None,\n infra_storage_profile: Optional[pulumi.Input['CloudProviderProfileInfraStorageProfileArgs']] = None):\n if infra_network_profile is not None:\n pulumi.set(__self__, \"infra_network_profile\", infra_network_profile)\n if infra_storage_profile is not None:\n pulumi.set(__self__, \"infra_storage_profile\", infra_storage_profile)", "def setupInfraNetwork(\n networkName: str, imageName: str, ctx: ExecContext) -> None:\n try:\n args = [\"--detach\"]\n if ctx.uidmaps:\n args.extend(ctx.getUidMaps())\n if ctx.dns:\n args.append(f\"--dns={ctx.dns}\")\n args.extend(ctx.getHosts())\n\n executePod(\"net-\" + networkName, args, imageName, [\"sleep\", \"Inf\"])\n except AlreadyRunning:\n pass", "def AddNetworkFlag(parser):\n help_text = \"\"\"\\\n The VPC network from which the AlloyDB instance is accessible via private\n IP. For example, projects/myProject/global/networks/default. This setting\n cannot be updated after it is set.\n \"\"\"\n parser.add_argument('--network', help=help_text)", "def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()", "def network_config(args): # pylint: disable-msg=W0613\n if not NETLOCK.acquire_read(NET_LOCK_TIMEOUT):\n raise HttpReqError(503, \"unable to take NETLOCK for reading after %s seconds\" % NET_LOCK_TIMEOUT)\n try:\n netconf = xivo_config.load_current_configuration()\n return yaml_json.stringify_keys(netconf)\n finally:\n NETLOCK.release()", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")", "def show_network_profile(self, profile, **params):\r\n return self.get(self.network_profile_path % (profile), params=params)", "def test_get_default_network(self):\n pass", "def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))", "def ipvs_config(self) -> Optional[pulumi.Input['ContainerServiceNetworkProfileIpvsConfigArgs']]:\n return pulumi.get(self, \"ipvs_config\")", "def get_network_profile(arn=None):\n pass" ]
[ "0.6846558", "0.66064805", "0.65819055", "0.65819055", "0.6472075", "0.63825357", "0.63816035", "0.6364383", "0.63634956", "0.63634956", "0.63531065", "0.62706405", "0.61537826", "0.6037617", "0.59884", "0.5984904", "0.59825045", "0.59778434", "0.5948426", "0.5938443", "0.58964115", "0.5858779", "0.5858779", "0.5834324", "0.5826007", "0.5792006", "0.57462394", "0.57103395", "0.56815743", "0.5672228" ]
0.7293378
0
Removes parameters which match the pattern from the config data
def _filter_data(self, pattern): removed = [] filtered = [] for param in self.data: if not param[0].startswith(pattern): filtered.append(param) else: removed.append(param) self.data = filtered return removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deleteConfig(matcher, config):\n result = matcher.search(config)\n if not result:\n return config, ()\n start, end = result.span()\n captures = result.groups()\n return (config[:start] + config[end:]), captures", "def _config_regex(self):", "def remove_info(config):\n clean_config = copy.deepcopy(config)\n\n if 'parameters' in clean_config:\n params = clean_config['parameters']\n for name in params:\n val = params[name]\n if isinstance(val, dict):\n # This should not generally happen since we deal with it in update_param_info, but just in case\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info remove error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n params[name] = val['val']\n\n return clean_config", "def _filter_pipeline_parameters(dct):\n return {k: v for k, v in dct.items() if k not in non_pipeline_parameter_names and k != dynamic_param_name}", "def delete_params_s(s, params):\n patt = \"(?s)\" + \"|\".join(\"(?<=\\n)\" + s + \"\\s*:.+?\\n(?=\\S+|$)\" for s in params)\n return re.sub(patt, \"\", \"\\n\" + s.strip() + \"\\n\").strip()", "def delete_params(s, *params):\n patt = '(?s)' + '|'.join(\n r'(?<=\\n)' + s + r'\\s*:.+?\\n(?=\\S+|$)' for s in params)\n return re.sub(patt, '', '\\n' + s.strip() + '\\n').strip()", "def deleteAttrPattern(*args, allPatterns: bool=True, patternName: AnyStr=\"\", patternType:\n AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def clean(pattern=default, *, module=None):\n pattern = default.unwrap(pattern, current_config[\"clean\"])\n\n if pattern is False:\n return\n\n if module is None:\n import __main__ as module\n\n items = vars(module)\n to_delete = [key for key in items if fnmatch.fnmatchcase(key, pattern)]\n\n for key in to_delete:\n del items[key]", "def clean_config(self, config):\n return config", "def clear_parameter_run_config_collisions(self) -> None:\n if not self.override_config:\n return\n keys = [key for key in self.override_config.keys()]\n for key in keys:\n if self.override_args.get(key):\n del self.override_config[key]", "def filter_checkpoint_parameter_by_list(origin_dict, param_filter):\n for key in list(origin_dict.keys()):\n for name in param_filter:\n if name in key:\n print(\"Delete parameter from checkpoint: \", key)\n del origin_dict[key]\n break", "def keep_params(s, *params):\n patt = '(?s)' + '|'.join(\n r'(?<=\\n)' + s + r'\\s*:.+?\\n(?=\\S+|$)' for s in params)\n return ''.join(re.findall(patt, '\\n' + s.strip() + '\\n')).rstrip()", "def cut_params(params, exclude):\n for ex_var, ex_list in exclude.items():\n for ex in ex_list:\n if ex in params[ex_var]:\n print(f'Excluding {ex_var}={ex:.3f} from grid')\n ex_idx = np.searchsorted(params[ex_var], ex)\n params[ex_var] = np.delete(params[ex_var], [ex_idx])", "def parse_remove_configuration(configuration):\n\n if configuration is None:\n return \"None\", None\n print('conf', configuration)\n conf_dict = collections.OrderedDict(configuration)\n\n name = 'remove'\n for key in conf_dict.keys():\n if key != 'weights' and key != 'boost':\n name += '_'\n name += key\n\n return name, conf_dict", "def remove_pattern(self, name):\n self._pattern_reg.__delitem__(name)", "def remove_parameters(self):\n self.parameters = []", "def clear_includepatterns(self):\n self._excludepatterns = []", "def clear_excludepatterns(self):\n self._excludepatterns = []", "def remove(data, pattern):\n return [''.join(filter(pattern, str)) for str in data]", "def clean_host_config(self, hostid, config, **kwargs):\n pass", "def filter_empty_subparams(self, param_name):\n param = self.module.params.get(param_name)\n filtered = []\n if isinstance(param, list):\n for subparam in param:\n if isinstance(subparam, dict):\n filtered.append(\n dict(\n (key, value)\n for key, value in subparam.items()\n if value is not None\n )\n )\n else:\n filtered = param\n return filtered", "def keep_params_s(s, params):\n patt = \"(?s)\" + \"|\".join(\"(?<=\\n)\" + s + \"\\s*:.+?\\n(?=\\S+|$)\" for s in params)\n return \"\".join(re.findall(patt, \"\\n\" + s.strip() + \"\\n\")).rstrip()", "def discard_config(self):\n raise NotImplementedError", "def _preprocess_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n return cast_config_values(\n {k: v for k, v in config.items() if k in self._hyperparameter_keys},\n config_space=self.config_space,\n )", "def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data", "def filter_unsupported_fields_from_config(self, configs, existing_endpoints, fields):\n for field in fields:\n if not any(field in e for e in existing_endpoints):\n for c in configs:\n endpoint = c['endpoint']\n if field in endpoint and endpoint[field] is None:\n del endpoint[field]", "def _match_filter(self, meta, field):\r\n val = meta[field]\r\n if field in self.ignored_values:\r\n for pattern in self.ignored_values[field]:\r\n val = val.replace(pattern, '')\r\n return val", "def delete_params(self, base_key, *params):\n self.params[\n base_key + '.no_' + '|'.join(params)] = delete_params(\n self.params[base_key], *params)", "def dict_cleanup(self, data):\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n if filter_value not in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n return data", "def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt" ]
[ "0.6340481", "0.6108231", "0.6008967", "0.5884418", "0.5812272", "0.5797809", "0.57954615", "0.5749245", "0.5746013", "0.5710172", "0.56394845", "0.56007004", "0.5600398", "0.5578663", "0.55692947", "0.55566084", "0.5534305", "0.5529074", "0.5525302", "0.5521977", "0.55085266", "0.54938245", "0.5479099", "0.5470927", "0.54310423", "0.5397376", "0.5394797", "0.538416", "0.53625065", "0.53304195" ]
0.744032
0
If the needed base does not exist, then create it, if it does exist create nothing and return the name of the base lxc container so it can be cloned.
def _get_base(**kwargs): profile = get_container_profile(copy.deepcopy(kwargs.get("profile"))) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, _marker) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is _marker: return profile_match return kw_overrides_match template = select("template") image = select("image") vgname = select("vgname") path = kwargs.get("path", None) # remove the above three variables from kwargs, if they exist, to avoid # duplicates if create() is invoked below. for param in ("path", "image", "vgname", "template"): kwargs.pop(param, None) if image: proto = urllib.parse.urlparse(image).scheme img_tar = __salt__["cp.cache_file"](image) img_name = os.path.basename(img_tar) hash_ = salt.utils.hashutils.get_hash( img_tar, __salt__["config.get"]("hash_type") ) name = f"__base_{proto}_{img_name}_{hash_}" if not exists(name, path=path): create( name, template=template, image=image, path=path, vgname=vgname, **kwargs ) if vgname: rootfs = os.path.join("/dev", vgname, name) edit_conf( info(name, path=path)["config"], out_format="commented", **{"lxc.rootfs": rootfs}, ) return name elif template: name = f"__base_{template}" if not exists(name, path=path): create( name, template=template, image=image, path=path, vgname=vgname, **kwargs ) if vgname: rootfs = os.path.join("/dev", vgname, name) edit_conf( info(name, path=path)["config"], out_format="commented", **{"lxc.rootfs": rootfs}, ) return name return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def create_lxd_container(public_key=None, name=\"test_name\"):\n container = None\n\n # Format name so it's valid\n name = name.replace(\"_\", \"-\").replace(\".\", \"\")\n\n client = get_lxd_client()\n if not client:\n raise Exception(\"Unable to connect to LXD\")\n\n test_machine = \"test-{}-{}\".format(\n uuid.uuid4().hex[-4:],\n name,\n )\n\n private_key_path, public_key_path = find_n2vc_ssh_keys()\n\n try:\n # create profile w/cloud-init and juju ssh key\n if not public_key:\n public_key = \"\"\n with open(public_key_path, \"r\") as f:\n public_key = f.readline()\n\n client.profiles.create(\n test_machine,\n config={\n 'user.user-data': '#cloud-config\\nssh_authorized_keys:\\n- {}'.format(public_key)},\n devices={\n 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},\n 'eth0': {\n 'nictype': 'bridged',\n 'parent': 'lxdbr0',\n 'type': 'nic'\n }\n }\n )\n except Exception as ex:\n debug(\"Error creating lxd profile {}: {}\".format(test_machine, ex))\n raise ex\n\n try:\n # create lxc machine\n config = {\n 'name': test_machine,\n 'source': {\n 'type': 'image',\n 'alias': 'xenial',\n 'mode': 'pull',\n 'protocol': 'simplestreams',\n 'server': 'https://cloud-images.ubuntu.com/releases',\n },\n 'profiles': [test_machine],\n }\n container = client.containers.create(config, wait=True)\n container.start(wait=True)\n except Exception as ex:\n debug(\"Error creating lxd container {}: {}\".format(test_machine, ex))\n # This is a test-ending failure.\n raise ex\n\n def wait_for_network(container, timeout=30):\n \"\"\"Wait for eth0 to have an ipv4 address.\"\"\"\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if 'eth0' in container.state().network:\n addresses = container.state().network['eth0']['addresses']\n if len(addresses) > 0:\n if addresses[0]['family'] == 'inet':\n return addresses[0]\n return None\n\n try:\n wait_for_network(container)\n except Exception as ex:\n debug(\n \"Error waiting for container {} network: {}\".format(\n test_machine,\n ex,\n )\n )\n\n try:\n waitcount = 0\n while waitcount <= 5:\n if is_sshd_running(container):\n break\n waitcount += 1\n time.sleep(1)\n if waitcount >= 5:\n debug(\"couldn't detect sshd running\")\n raise Exception(\"Unable to verify container sshd\")\n\n except Exception as ex:\n debug(\n \"Error checking sshd status on {}: {}\".format(\n test_machine,\n ex,\n )\n )\n\n # HACK: We need to give sshd a chance to bind to the interface,\n # and pylxd's container.execute seems to be broken and fails and/or\n # hangs trying to properly check if the service is up.\n (exit_code, stdout, stderr) = container.execute([\n 'ping',\n '-c', '5', # Wait for 5 ECHO_REPLY\n '8.8.8.8', # Ping Google's public DNS\n '-W', '15', # Set a 15 second deadline\n ])\n if exit_code > 0:\n # The network failed\n raise Exception(\"Unable to verify container network\")\n\n return container", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def build_base():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/requirements.txt')\n\n with cd('/srv/build'):\n run('docker build -t {base_image_name} .'.format(\n base_image_name=env.base_image_name,\n ))", "def _get_container_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_container_name = f\"{dirname}_{self.config_name}\"\n container_name = self.config_options.get(\"container_name\", default_container_name)\n return container_name", "def generate_dockerfile_extension(base_image, template_name, config_path):\n template_path = get_template_path(template_name, config_path)\n template_file = os.path.join(template_path, \"Dockerfile\")\n dockerfile = \".Dockerfile.luda\"\n\n def remove():\n if os.path.exists(dockerfile):\n os.remove(dockerfile)\n\n with cd(template_path, remove):\n with open(dockerfile, \"w\") as output:\n docker_str = j2docker.render(base_image, template_file).decode().strip()\n output.write(docker_str)\n client = docker.from_env()\n if base_image.startswith(\"luda/\"):\n _, _, image_name = base_image.partition(\"luda/\")\n image_name, _, tag = image_name.partition(\":\")\n image_name = \"luda/{0}:{1}-{2}\".format(image_name, tag, template_name)\n else:\n image_name = \"luda/{0}:{1}\".format(base_image.replace('/', '-').replace(':', '-'), template_name)\n click.echo(\"Building image: {0} ...\".format(image_name))\n client.images.build(path=os.getcwd(), tag=image_name, dockerfile=dockerfile) # This line doesn't work with Python 3...\n return image_name", "def shadow_container(kls):\n if os.name == 'posix' and os.path.isdir('/dev/shm'):\n return '/dev/shm/'\n else:\n return gettempdir()", "def ddtest_create_generic_container_w_name(self, name=None):\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='generic')", "def GenerateContainer(ctx, \n container_name=None, \n container_iname=None,\n base_interfaces=()\n ):\n # create container\n container_name = container_name or \\\n \"%sContainer\" % (ctx.domain_model.__name__)\n \n # allow passing in dotted python path\n if isinstance(ctx.container_module, (str, unicode)):\n ctx.container_module = resolve(ctx.container_module)\n \n # if not present use the domain class's module\n elif ctx.container_module is None:\n ctx.container_module = resolve(ctx.domain_model.__module__)\n \n # sanity check we have a module for the container\n assert isinstance(ctx.container_module, types.ModuleType), \"Invalid Container\"\n \n # logging variables\n msg = (ctx.domain_model.__name__, \n ctx.container_module.__name__, container_name)\n \n # if we already have a container class, exit \n if getattr(ctx.container_module, container_name, None):\n if ctx.echo:\n ctx.logger.debug(\"%s: found container %s.%s, skipping\" % msg)\n ctx.container_class = getattr(ctx.container_module, container_name)\n return\n \n if ctx.echo:\n ctx.logger.debug(\"%s: generated container %s.%s\" % msg)\n \n # if we already have a container class, exit\n container_class = type(container_name,\n (AlchemistContainer,),\n dict(_class=ctx.domain_model,\n __module__=ctx.container_module.__name__)\n )\n setattr(ctx.container_module, container_name, container_class)\n \n # save container class on catalyst context\n ctx.container_class = container_class\n \n # interface for container\n container_iname = container_iname or \"I%s\" % container_name\n \n # if the interface module is none, then use the nearest one to the domain class\n if ctx.interface_module is None:\n ctx.interface_module = _get_interface_module_for(ctx)\n\n msg = (ctx.domain_model.__name__,\n ctx.container_module.__name__, container_iname)\n \n # if we already have a container interface class, skip creation\n container_interface = getattr(ctx.interface_module, container_iname, None)\n if container_interface is not None:\n assert issubclass(container_interface, IAlchemistContainer)\n if ctx.echo:\n ctx.logger.debug(\"%s: skipping container interface %s.%s for\" % msg)\n else:\n if ctx.echo:\n ctx.logger.debug(\"%s: generated container interface %s.%s\" % msg)\n # ensure that our base interfaces include alchemist container \n if base_interfaces:\n assert isinstance(base_interfaces, tuple)\n found = False\n for bi in base_interfaces:\n found = issubclass(bi, IAlchemistContainer)\n if found: break\n if not found:\n base_interfaces = base_interfaces + (IAlchemistContainer,)\n else:\n base_interfaces = (IAlchemistContainer,)\n \n # create interface\n container_interface = interface.interface.InterfaceClass(\n container_iname,\n bases=base_interfaces,\n __module__=ctx.interface_module.__name__\n )\n # store container interface for catalyst\n ctx.container_interface = container_interface\n setattr(ctx.interface_module, container_iname, container_interface)\n \n # setup security\n for n,d in container_interface.namesAndDescriptions(1):\n protectName(container_class, n, \"zope.Public\")\n \n if not container_interface.implementedBy(container_class):\n interface.classImplements(container_class, container_interface)\n ctx.container_interface = container_interface", "def container_name(self):\n prefix = get_service_prefix('core' if self.core else 'service')\n return f'{prefix}{self.data.get(\"name\")}'", "def unique_branch_name(base_name):\n repo = git.repo()\n branches = repo.branches()\n collision = True\n count = 1\n while collision:\n new_branch = base_name + \"-bak-\" +str(count)\n collision = next((x for x in branches if x == new_branch), False)\n count += 1\n return new_branch", "def ddtest_create_generic_container_w_empty_or_null_name(self, name=None):\n if name is None:\n self._skip_on_issue('launchpad', '1354767')\n\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=container_resp.id, type='generic')", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )", "def _ensure_image(testkit_path, branch_name, artifacts_path):\n # Construct Docker image name from branch name\n image_name = \"runner:%s\" % branch_name\n image_path = os.path.join(testkit_path, \"runner_image\")\n docker.build_and_tag(image_name, image_path, log_path=artifacts_path)\n\n return image_name", "def create(\n name, config=None, profile=None, network_profile=None, nic_opts=None, **kwargs\n):\n # Required params for 'download' template\n download_template_deps = (\"dist\", \"release\", \"arch\")\n\n cmd = f\"lxc-create -n {name}\"\n\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, None)\n profile_match = profile.pop(key, default)\n # Return the profile match if the kwarg match was None, as the\n # lxc.present state will pass these kwargs set to None by default.\n if kw_overrides_match is None:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n if exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' already exists\")\n\n tvg = select(\"vgname\")\n vgname = tvg if tvg else __salt__[\"config.get\"](\"lxc.vgname\")\n\n # The 'template' and 'image' params conflict\n template = select(\"template\")\n image = select(\"image\")\n if template and image:\n raise SaltInvocationError(\"Only one of 'template' and 'image' is permitted\")\n elif not any((template, image, profile)):\n raise SaltInvocationError(\n \"At least one of 'template', 'image', and 'profile' is required\"\n )\n\n options = select(\"options\") or {}\n backing = select(\"backing\")\n if vgname and not backing:\n backing = \"lvm\"\n lvname = select(\"lvname\")\n thinpool = select(\"thinpool\")\n fstype = select(\"fstype\")\n size = select(\"size\", \"1G\")\n zfsroot = select(\"zfsroot\")\n if backing in (\"dir\", \"overlayfs\", \"btrfs\", \"zfs\"):\n fstype = None\n size = None\n # some backends won't support some parameters\n if backing in (\"aufs\", \"dir\", \"overlayfs\", \"btrfs\"):\n lvname = vgname = thinpool = None\n\n if image:\n img_tar = __salt__[\"cp.cache_file\"](image)\n template = os.path.join(\n os.path.dirname(salt.__file__), \"templates\", \"lxc\", \"salt_tarball\"\n )\n options[\"imgtar\"] = img_tar\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n if not os.path.exists(path):\n os.makedirs(path)\n if config:\n cmd += f\" -f {config}\"\n if template:\n cmd += f\" -t {template}\"\n if backing:\n backing = backing.lower()\n cmd += f\" -B {backing}\"\n if backing in (\"zfs\",):\n if zfsroot:\n cmd += f\" --zfsroot {zfsroot}\"\n if backing in (\"lvm\",):\n if lvname:\n cmd += f\" --lvname {lvname}\"\n if vgname:\n cmd += f\" --vgname {vgname}\"\n if thinpool:\n cmd += f\" --thinpool {thinpool}\"\n if backing not in (\"dir\", \"overlayfs\"):\n if fstype:\n cmd += f\" --fstype {fstype}\"\n if size:\n cmd += f\" --fssize {size}\"\n\n if options:\n if template == \"download\":\n missing_deps = [x for x in download_template_deps if x not in options]\n if missing_deps:\n raise SaltInvocationError(\n \"Missing params in 'options' dict: {}\".format(\n \", \".join(missing_deps)\n )\n )\n cmd += \" --\"\n for key, val in options.items():\n cmd += f\" --{key} {val}\"\n\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n # please do not merge extra conflicting stuff\n # inside those two line (ret =, return)\n return _after_ignition_network_profile(\n cmd, ret, name, network_profile, path, nic_opts\n )", "def _get_image(runtime):\n return \"{}:{}\".format(LambdaContainer._IMAGE_REPO_NAME, runtime)", "def mkdir(base, name):\n path = os.path.join(base, name)\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image", "def test_create_container(self):\n pass", "def get_base_docker_image(docker_file):\n with open(docker_file) as f:\n from_line = next(\n line for line in f.read().split(\"\\n\") if line.startswith(\"FROM\")\n )\n _from, base_image = from_line.split()\n return base_image", "def container_name(self):\n pass", "def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())", "def clone(name, orig, profile=None, network_profile=None, nic_opts=None, **kwargs):\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, None)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is None:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n if exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' already exists\")\n\n _ensure_exists(orig, path=path)\n if state(orig, path=path) != \"stopped\":\n raise CommandExecutionError(f\"Container '{orig}' must be stopped to be cloned\")\n\n backing = select(\"backing\")\n snapshot = select(\"snapshot\")\n if backing in (\"dir\",):\n snapshot = False\n if not snapshot:\n snapshot = \"\"\n else:\n snapshot = \"-s\"\n\n size = select(\"size\", \"1G\")\n if backing in (\"dir\", \"overlayfs\", \"btrfs\"):\n size = None\n # LXC commands and options changed in 2.0 - CF issue #34086 for details\n if Version(version()) >= Version(\"2.0\"):\n # https://linuxcontainers.org/lxc/manpages//man1/lxc-copy.1.html\n cmd = \"lxc-copy\"\n cmd += f\" {snapshot} -n {orig} -N {name}\"\n else:\n # https://linuxcontainers.org/lxc/manpages//man1/lxc-clone.1.html\n cmd = \"lxc-clone\"\n cmd += f\" {snapshot} -o {orig} -n {name}\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n if not os.path.exists(path):\n os.makedirs(path)\n if backing:\n backing = backing.lower()\n cmd += f\" -B {backing}\"\n if backing not in (\"dir\", \"overlayfs\"):\n if size:\n cmd += f\" -L {size}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n # please do not merge extra conflicting stuff\n # inside those two line (ret =, return)\n return _after_ignition_network_profile(\n cmd, ret, name, network_profile, path, nic_opts\n )", "def create_base_projects_folder():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n os.mkdir(context.__PROJECTS_PATH__, mode=0o777)\n log.info(\"Base folder '.wcscanner' created in %s\", context.__BASE_PATH__)\n else:\n log.info(\"Base folder '.wcscanner' already in %s\", context.__BASE_PATH__)", "def container_name(self):\n if self._container_name:\n return self._container_name\n else:\n return self.image.split(u'/').pop()", "def base():\n wheels()\n build_base()\n push_base()", "def get_new_name(base_name: str, *args: Container[str]) -> str:\n if not _contains(base_name, args):\n return base_name\n\n bin_iter = BinaryIterator(1, None)\n while bin_iter.has_next():\n new_name = f'{base_name}_{bin_iter.get_next():d}'\n if _contains(new_name, args):\n bin_iter.up()\n else:\n bin_iter.save_info(new_name)\n bin_iter.down()\n\n result = bin_iter.get_last_save_info()\n assert result is not None, 'binary search should find a solution'\n return result", "def DeployContainer(self, base_name, container_spec):\n name = base_name + str(len(self.containers[base_name]))\n container = KubernetesContainer(container_spec=container_spec, name=name)\n self.containers[base_name].append(container)\n container.Create()", "def container_factory(self, name):" ]
[ "0.60583836", "0.5907243", "0.5885327", "0.57861507", "0.56870806", "0.5686935", "0.56822044", "0.5660491", "0.5565593", "0.554921", "0.54963577", "0.5486417", "0.5474102", "0.54729396", "0.54644406", "0.5462381", "0.54609567", "0.5460335", "0.5419726", "0.53880507", "0.52886796", "0.5262127", "0.52530044", "0.5252433", "0.5242874", "0.5227914", "0.52140576", "0.520212", "0.51969784", "0.5138649" ]
0.6840039
0
Initialize a new container. This is a partial idempotent function as if it is already provisioned, we will reset a bit the lxc configuration file but much of the hard work will be escaped as markers will prevent reexecution of harmful tasks. name Name of the container image A tar archive to use as the rootfs for the container. Conflicts with the ``template`` argument. cpus Select a random number of cpu cores and assign it to the cpuset, if the cpuset option is set then this option will be ignored cpuset Explicitly define the cpus this container will be bound to cpushare cgroups cpu shares autostart autostart container on reboot memory cgroups memory limit, in MB
def init( name, config=None, cpuset=None, cpushare=None, memory=None, profile=None, network_profile=None, nic_opts=None, cpu=None, autostart=True, password=None, password_encrypted=None, users=None, dnsservers=None, searchdomains=None, bridge=None, gateway=None, pub_key=None, priv_key=None, force_install=False, unconditional_install=False, bootstrap_delay=None, bootstrap_args=None, bootstrap_shell=None, bootstrap_url=None, **kwargs, ): ret = {"name": name, "changes": {}} profile = get_container_profile(copy.deepcopy(profile)) if not network_profile: network_profile = profile.get("network_profile") if not network_profile: network_profile = DEFAULT_NIC # Changes is a pointer to changes_dict['init']. This method is used so that # we can have a list of changes as they are made, providing an ordered list # of things that were changed. changes_dict = {"init": []} changes = changes_dict.get("init") if users is None: users = [] dusers = ["root"] for user in dusers: if user not in users: users.append(user) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, _marker) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is _marker: return profile_match return kw_overrides_match path = select("path") bpath = get_root_path(path) state_pre = state(name, path=path) tvg = select("vgname") vgname = tvg if tvg else __salt__["config.get"]("lxc.vgname") start_ = select("start", True) autostart = select("autostart", autostart) seed = select("seed", True) install = select("install", True) seed_cmd = select("seed_cmd") salt_config = _get_salt_config(config, **kwargs) approve_key = select("approve_key", True) clone_from = select("clone_from") # If using a volume group then set up to make snapshot cow clones if vgname and not clone_from: try: kwargs["vgname"] = vgname clone_from = _get_base(profile=profile, **kwargs) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = exc.strerror if changes: ret["changes"] = changes_dict return ret if not kwargs.get("snapshot") is False: kwargs["snapshot"] = True does_exist = exists(name, path=path) to_reboot = False remove_seed_marker = False if does_exist: pass elif clone_from: remove_seed_marker = True try: clone(name, clone_from, profile=profile, **kwargs) changes.append({"create": "Container cloned"}) except (SaltInvocationError, CommandExecutionError) as exc: if "already exists" in exc.strerror: changes.append({"create": "Container already exists"}) else: ret["result"] = False ret["comment"] = exc.strerror if changes: ret["changes"] = changes_dict return ret cfg = _LXCConfig( name=name, network_profile=network_profile, nic_opts=nic_opts, bridge=bridge, path=path, gateway=gateway, autostart=autostart, cpuset=cpuset, cpushare=cpushare, memory=memory, ) old_chunks = read_conf(cfg.path, out_format="commented") cfg.write() chunks = read_conf(cfg.path, out_format="commented") if old_chunks != chunks: to_reboot = True else: remove_seed_marker = True cfg = _LXCConfig( network_profile=network_profile, nic_opts=nic_opts, cpuset=cpuset, path=path, bridge=bridge, gateway=gateway, autostart=autostart, cpushare=cpushare, memory=memory, ) with cfg.tempfile() as cfile: try: create(name, config=cfile.name, profile=profile, **kwargs) changes.append({"create": "Container created"}) except (SaltInvocationError, CommandExecutionError) as exc: if "already exists" in exc.strerror: changes.append({"create": "Container already exists"}) else: ret["comment"] = exc.strerror if changes: ret["changes"] = changes_dict return ret cpath = os.path.join(bpath, name, "config") old_chunks = [] if os.path.exists(cpath): old_chunks = read_conf(cpath, out_format="commented") new_cfg = _config_list( conf_tuples=old_chunks, cpu=cpu, network_profile=network_profile, nic_opts=nic_opts, bridge=bridge, cpuset=cpuset, cpushare=cpushare, memory=memory, ) if new_cfg: edit_conf(cpath, out_format="commented", lxc_config=new_cfg) chunks = read_conf(cpath, out_format="commented") if old_chunks != chunks: to_reboot = True # last time to be sure any of our property is correctly applied cfg = _LXCConfig( name=name, network_profile=network_profile, nic_opts=nic_opts, bridge=bridge, path=path, gateway=gateway, autostart=autostart, cpuset=cpuset, cpushare=cpushare, memory=memory, ) old_chunks = [] if os.path.exists(cfg.path): old_chunks = read_conf(cfg.path, out_format="commented") cfg.write() chunks = read_conf(cfg.path, out_format="commented") if old_chunks != chunks: changes.append({"config": "Container configuration updated"}) to_reboot = True if to_reboot: try: stop(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = f"Unable to stop container: {exc}" if changes: ret["changes"] = changes_dict return ret if not does_exist or (does_exist and state(name, path=path) != "running"): try: start(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = f"Unable to stop container: {exc}" if changes: ret["changes"] = changes_dict return ret if remove_seed_marker: run( name, f"rm -f '{SEED_MARKER}'", path=path, chroot_fallback=False, python_shell=False, ) # set the default user/password, only the first time if ret.get("result", True) and password: gid = "/.lxc.initial_pass" gids = [gid, "/lxc.initial_pass", f"/.lxc.{name}.initial_pass"] if not any( retcode( name, f'test -e "{x}"', chroot_fallback=True, path=path, ignore_retcode=True, ) == 0 for x in gids ): # think to touch the default user generated by default templates # which has a really unsecure passwords... # root is defined as a member earlier in the code for default_user in ["ubuntu"]: if ( default_user not in users and retcode( name, f"id {default_user}", python_shell=False, path=path, chroot_fallback=True, ignore_retcode=True, ) == 0 ): users.append(default_user) for user in users: try: cret = set_password( name, users=[user], path=path, password=password, encrypted=password_encrypted, ) except (SaltInvocationError, CommandExecutionError) as exc: msg = f"{user}: Failed to set password" + exc.strerror # only hardfail in unrecoverable situation: # root cannot be setted up if user == "root": ret["comment"] = msg ret["result"] = False else: log.debug(msg) if ret.get("result", True): changes.append({"password": "Password(s) updated"}) if ( retcode( name, 'sh -c \'touch "{0}"; test -e "{0}"\''.format(gid), path=path, chroot_fallback=True, ignore_retcode=True, ) != 0 ): ret["comment"] = "Failed to set password marker" changes[-1]["password"] += ". " + ret["comment"] + "." ret["result"] = False # set dns servers if any, only the first time if ret.get("result", True) and dnsservers: # retro compatibility, test also old markers gid = "/.lxc.initial_dns" gids = [gid, "/lxc.initial_dns", f"/lxc.{name}.initial_dns"] if not any( retcode( name, f'test -e "{x}"', chroot_fallback=True, path=path, ignore_retcode=True, ) == 0 for x in gids ): try: set_dns( name, path=path, dnsservers=dnsservers, searchdomains=searchdomains ) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = "Failed to set DNS: " + exc.strerror ret["result"] = False else: changes.append({"dns": "DNS updated"}) if ( retcode( name, 'sh -c \'touch "{0}"; test -e "{0}"\''.format(gid), chroot_fallback=True, path=path, ignore_retcode=True, ) != 0 ): ret["comment"] = "Failed to set DNS marker" changes[-1]["dns"] += ". " + ret["comment"] + "." ret["result"] = False # retro compatibility, test also old markers if remove_seed_marker: run(name, f"rm -f '{SEED_MARKER}'", path=path, python_shell=False) gid = "/.lxc.initial_seed" gids = [gid, "/lxc.initial_seed"] if any( retcode( name, f"test -e {x}", path=path, chroot_fallback=True, ignore_retcode=True, ) == 0 for x in gids ) or not ret.get("result", True): pass elif seed or seed_cmd: if seed: try: result = bootstrap( name, config=salt_config, path=path, approve_key=approve_key, pub_key=pub_key, priv_key=priv_key, install=install, force_install=force_install, unconditional_install=unconditional_install, bootstrap_delay=bootstrap_delay, bootstrap_url=bootstrap_url, bootstrap_shell=bootstrap_shell, bootstrap_args=bootstrap_args, ) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = "Bootstrap failed: " + exc.strerror ret["result"] = False else: if not result: ret[ "comment" ] = "Bootstrap failed, see minion log for more information" ret["result"] = False else: changes.append({"bootstrap": "Container successfully bootstrapped"}) elif seed_cmd: try: result = __salt__[seed_cmd]( info(name, path=path)["rootfs"], name, salt_config ) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = "Bootstrap via seed_cmd '{}' failed: {}".format( seed_cmd, exc.strerror ) ret["result"] = False else: if not result: ret["comment"] = ( "Bootstrap via seed_cmd '{}' failed, " "see minion log for more information ".format(seed_cmd) ) ret["result"] = False else: changes.append( { "bootstrap": ( "Container successfully bootstrapped " "using seed_cmd '{}'".format(seed_cmd) ) } ) if ret.get("result", True) and not start_: try: stop(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = f"Unable to stop container: {exc}" ret["result"] = False state_post = state(name, path=path) if state_pre != state_post: changes.append({"state": {"old": state_pre, "new": state_post}}) if ret.get("result", True): ret["comment"] = f"Container '{name}' successfully initialized" ret["result"] = True if changes: ret["changes"] = changes_dict return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_node(self, **kwargs):\n default = \"ubuntu.precise\"\n template = {\"name\":\"ubuntu\", \"args\":[]}\n if 'image' not in kwargs:\n kwargs['image'] = default\n \n for image in self.list_images():\n if image.name == kwargs['image']:\n template = {\"name\":image.extra[\"template_name\"],\n \"args\":image.extra[\"template_args\"]\n }\n \n name = kwargs['name']\n container = {\n \"cgroups\": [],\n \"name\": name,\n \"conf\": [],\n \"template\": template\n }\n \n self.connection.request(action=\"/v1/containers\", method=\"POST\", data=json.dumps(container))\n self.connection.request(action=\"/v1/containers/%s/actions/start\" % name, method=\"POST\")\n return self.get_node(name)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n container_registry_name: Optional[pulumi.Input[str]] = None,\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(self, cpus=None, blocking_io_penalty=None):\n super(LocalDockerBackend, self).__init__()\n self.cpus = cpus \\\n or jetstream.settings['backends']['local']['cpus'] \\\n or jetstream.utils.guess_local_cpus()\n self.bip = blocking_io_penalty \\\n or jetstream.settings['backends']['local']['blocking_io_penalty'].get(int)\n self._cpu_sem = BoundedSemaphore( self.cpus )\n memory_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')\n self.memory_gb = int( memory_bytes/(1024.**3) )\n self._mem_sem = BoundedSemaphore( self.memory_gb )\n self._resources_lock = Lock()\n log.info(f'LocalDockerBackend initialized with {self.cpus} cpus and {self.memory_gb}G memory')", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def containers_init(self):\n\n def test_container(name, state, status):\n \"\"\"Creates test container. \"\"\"\n return {\n 'Image': \"alpine:3.7\",\n 'Command': \"/bin/sleep 999\",\n 'Labels': {'out': ''},\n 'State': state,\n 'Created': 1524205394,\n 'Status': status,\n 'Names': [\"/\" + name]\n }\n\n state_created = 'created'\n state_running = 'running'\n\n status_created = 'Created'\n status_up = 'Up 15 minutes'\n\n self.containers_list = [\n test_container(self.container_to_run,\n state_created, status_created),\n test_container(self.container_running,\n state_running, status_up),\n test_container(self.container_to_remove,\n state_created, status_created),\n ]\n\n CLIENT.containers_list.extend(self.containers_list)", "def __init__(__self__, *,\n container_registry_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"container_registry_name\", container_registry_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if instance_count is not None:\n pulumi.set(__self__, \"instance_count\", instance_count)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)\n if virtual_network_subnet_id is not None:\n pulumi.set(__self__, \"virtual_network_subnet_id\", virtual_network_subnet_id)", "def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_2\" if name is None else name\n super(CliVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n self.add_proc(rift.vcs.DtsPerfTasklet(), mode_active=False)\n self.add_proc(RedisServer(), mode_active=False) \n if not start:\n self.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)", "def __init__(__self__, *,\n container_registry_name: Optional[pulumi.Input[str]] = None,\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None):\n if container_registry_name is not None:\n pulumi.set(__self__, \"container_registry_name\", container_registry_name)\n if instance_count is not None:\n pulumi.set(__self__, \"instance_count\", instance_count)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)\n if virtual_network_subnet_id is not None:\n pulumi.set(__self__, \"virtual_network_subnet_id\", virtual_network_subnet_id)", "def create_ubuntu_node(self, user_data, network_config=None, x=0, y=0, image=None, cpus=None, ram=None, disk=None, ethernets=None, vnc=None):\n # Create an ISO image containing the boot configuration and upload it\n # to the GNS3 project. We write the config to a temporary file,\n # convert it to ISO image, then post the ISO image to GNS3.\n\n assert image\n\n print(f\"Building cloud-init configuration for {user_data['hostname']}...\")\n\n # Putting local-hostname in meta-data ensures that any initial DHCP will be done with hostname, not 'ubuntu'\n meta_data = {'local-hostname': user_data['hostname']}\n\n # Generate the ISO image that will be used as a virtual CD-ROM to pass all this initialization data to cloud-init.\n\n meta_data_file = tempfile.NamedTemporaryFile(delete = False)\n meta_data_file.write(yaml.dump(meta_data).encode('utf-8'))\n meta_data_file.close()\n\n user_data_file = tempfile.NamedTemporaryFile(delete = False)\n user_data_file.write((\"#cloud-config\\n\" + yaml.dump(user_data)).encode('utf-8'))\n user_data_file.close()\n\n genisoimage_command = [\"genisoimage\", \"-input-charset\", \"utf-8\", \"-o\", \"-\", \"-l\",\n \"-relaxed-filenames\", \"-V\", \"cidata\", \"-graft-points\",\n \"meta-data={}\".format(meta_data_file.name),\n \"user-data={}\".format(user_data_file.name)]\n\n if network_config:\n network_config_file = tempfile.NamedTemporaryFile(delete = False)\n network_config_file.write(yaml.dump(network_config).encode('utf-8'))\n network_config_file.close()\n genisoimage_command.append(\"network-config={}\".format(network_config_file.name))\n\n genisoimage_proc = subprocess.Popen(genisoimage_command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n isoimage = genisoimage_proc.stdout.read()\n\n debug_isoimage = False\n if debug_isoimage:\n with open('isoimage-debug.iso', 'wb') as f:\n f.write(isoimage)\n\n os.remove(meta_data_file.name)\n os.remove(user_data_file.name)\n if network_config:\n os.remove(network_config_file.name)\n\n print(f\"Uploading cloud-init configuration for {user_data['hostname']}...\")\n\n # files in the GNS3 directory take precedence over these project files,\n # so we need to make these file names unique\n cdrom_image = self.project_id + '_' + user_data['hostname'] + '.iso'\n file_url = \"{}/files/{}\".format(self.url, cdrom_image)\n result = requests.post(file_url, auth=self.auth, data=isoimage)\n result.raise_for_status()\n\n # Configure an Ubuntu cloud node\n\n print(f\"Configuring {user_data['hostname']} node...\")\n\n url = \"{}/nodes\".format(self.url)\n\n # It's important to use the scsi disk interface, because the IDE interface in qemu\n # has some kind of bug, probably in its handling of DISCARD operations, that\n # causes a thin provisioned disk to balloon up with garbage.\n #\n # See https://unix.stackexchange.com/questions/700050\n # and https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1974100\n\n ubuntu_node = {\n \"compute_id\": \"local\",\n \"name\": user_data['hostname'],\n \"node_type\": \"qemu\",\n \"properties\": {\n \"adapter_type\" : \"virtio-net-pci\",\n \"hda_disk_image\": image,\n \"hda_disk_interface\": \"scsi\",\n \"cdrom_image\" : cdrom_image,\n \"qemu_path\": \"/usr/bin/qemu-system-x86_64\",\n \"process_priority\": \"very high\",\n },\n\n # ens4, ens5, ens6 seems to be the numbering scheme on Ubuntu 20,\n # but we can't replicate that with a Python format string\n \"port_name_format\": \"eth{}\",\n\n \"symbol\": \":/symbols/qemu_guest.svg\",\n \"x\" : x,\n \"y\" : y\n }\n\n if cpus:\n ubuntu_node['properties']['cpus'] = cpus\n if ram:\n ubuntu_node['properties']['ram'] = ram\n if ethernets:\n ubuntu_node['properties']['adapters'] = ethernets\n if vnc:\n ubuntu_node['console_type'] = 'vnc'\n\n result = requests.post(url, auth=self.auth, data=json.dumps(ubuntu_node))\n result.raise_for_status()\n ubuntu = result.json()\n\n if disk and disk > 2048:\n url = \"{}/compute/projects/{}/qemu/nodes/{}/resize_disk\".format(self.server.url, self.project_id, ubuntu['node_id'])\n resize_obj = {'drive_name' : 'hda', 'extend' : disk - 2048}\n result = requests.post(url, auth=self.auth, data=json.dumps(resize_obj))\n result.raise_for_status()\n\n self.nodes() # update self.cached_nodes\n return ubuntu", "def create_cluster_template(\n self, name, image_id=None, keypair_id=None, coe=None, **kwargs\n ):\n cluster_template = (\n self.container_infrastructure_management.create_cluster_template(\n name=name,\n image_id=image_id,\n keypair_id=keypair_id,\n coe=coe,\n **kwargs,\n )\n )\n\n return cluster_template", "def __init__(__self__, *,\n lab_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n user_name: pulumi.Input[str],\n arm_template_display_name: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input['EnvironmentDeploymentPropertiesArgs']] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"lab_name\", lab_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"user_name\", user_name)\n if arm_template_display_name is not None:\n pulumi.set(__self__, \"arm_template_display_name\", arm_template_display_name)\n if deployment_properties is not None:\n pulumi.set(__self__, \"deployment_properties\", deployment_properties)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(__self__, *,\n cpu: Optional[pulumi.Input[str]] = None,\n memory: Optional[pulumi.Input[str]] = None):\n if cpu is None:\n cpu = '1'\n if cpu is not None:\n pulumi.set(__self__, \"cpu\", cpu)\n if memory is None:\n memory = '2Gi'\n if memory is not None:\n pulumi.set(__self__, \"memory\", memory)", "def __init__(__self__, *,\n cpu: Optional[pulumi.Input[str]] = None,\n memory: Optional[pulumi.Input[str]] = None):\n if cpu is None:\n cpu = '1'\n if cpu is not None:\n pulumi.set(__self__, \"cpu\", cpu)\n if memory is None:\n memory = '2Gi'\n if memory is not None:\n pulumi.set(__self__, \"memory\", memory)", "def __init__(self, name=None, image=None, cmd=None, env_vars=None, resources=None, replicas=None, cpu=None, gpu=None, memory=None): # noqa: E501 # noqa: E501\n self._name = None\n self._image = None\n self._cmd = None\n self._env_vars = None\n self._resources = None\n self._replicas = None\n self._cpu = None\n self._gpu = None\n self._memory = None\n self.discriminator = None\n if name is not None:\n self.name = name\n if image is not None:\n self.image = image\n if cmd is not None:\n self.cmd = cmd\n if env_vars is not None:\n self.env_vars = env_vars\n if resources is not None:\n self.resources = resources\n if replicas is not None:\n self.replicas = replicas\n if cpu is not None:\n self.cpu = cpu\n if gpu is not None:\n self.gpu = gpu\n if memory is not None:\n self.memory = memory", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n pre_provisioning_hook: Optional[pulumi.Input[pulumi.InputType['ProvisioningTemplateProvisioningHookArgs']]] = None,\n provisioning_role_arn: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProvisioningTemplateTagArgs']]]]] = None,\n template_body: Optional[pulumi.Input[str]] = None,\n template_name: Optional[pulumi.Input[str]] = None,\n template_type: Optional[pulumi.Input['ProvisioningTemplateTemplateType']] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n block_size_bytes: Optional[pulumi.Input[int]] = None,\n container_id: Optional[pulumi.Input[str]] = None,\n disk_file_format: Optional[pulumi.Input[Union[str, 'DiskFileFormat']]] = None,\n disk_size_gb: Optional[pulumi.Input[float]] = None,\n dynamic: Optional[pulumi.Input[bool]] = None,\n extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,\n hyper_v_generation: Optional[pulumi.Input[Union[str, 'HyperVGeneration']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logical_sector_bytes: Optional[pulumi.Input[int]] = None,\n physical_sector_bytes: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_hard_disk_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n identity: pulumi.Input['ClusterIdentityArgs'],\n resource_group_name: pulumi.Input[str],\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"identity\", identity)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if size_gb is not None:\n pulumi.set(__self__, \"size_gb\", size_gb)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(__self__, *,\n availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n cloud_provider_profile: Optional[pulumi.Input['CloudProviderProfileArgs']] = None,\n control_plane_endpoint: Optional[pulumi.Input['ControlPlaneEndpointProfileControlPlaneEndpointArgs']] = None,\n count: Optional[pulumi.Input[int]] = None,\n linux_profile: Optional[pulumi.Input['LinuxProfilePropertiesArgs']] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[Union[str, 'Mode']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n node_image_version: Optional[pulumi.Input[str]] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n os_type: Optional[pulumi.Input[Union[str, 'OsType']]] = None,\n vm_size: Optional[pulumi.Input[str]] = None):\n if availability_zones is not None:\n pulumi.set(__self__, \"availability_zones\", availability_zones)\n if cloud_provider_profile is not None:\n pulumi.set(__self__, \"cloud_provider_profile\", cloud_provider_profile)\n if control_plane_endpoint is not None:\n pulumi.set(__self__, \"control_plane_endpoint\", control_plane_endpoint)\n if count is None:\n count = 1\n if count is not None:\n pulumi.set(__self__, \"count\", count)\n if linux_profile is not None:\n pulumi.set(__self__, \"linux_profile\", linux_profile)\n if max_count is not None:\n pulumi.set(__self__, \"max_count\", max_count)\n if max_pods is not None:\n pulumi.set(__self__, \"max_pods\", max_pods)\n if min_count is not None:\n pulumi.set(__self__, \"min_count\", min_count)\n if mode is None:\n mode = 'User'\n if mode is not None:\n pulumi.set(__self__, \"mode\", mode)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_image_version is not None:\n pulumi.set(__self__, \"node_image_version\", node_image_version)\n if node_labels is not None:\n pulumi.set(__self__, \"node_labels\", node_labels)\n if node_taints is not None:\n pulumi.set(__self__, \"node_taints\", node_taints)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if vm_size is not None:\n pulumi.set(__self__, \"vm_size\", vm_size)", "def ddtest_create_generic_container_w_name(self, name=None):\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='generic')", "def __init__(__self__, *,\n name: pulumi.Input[str],\n availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n capacity_reservation_group_id: Optional[pulumi.Input[str]] = None,\n count: Optional[pulumi.Input[int]] = None,\n creation_data: Optional[pulumi.Input['CreationDataArgs']] = None,\n enable_auto_scaling: Optional[pulumi.Input[bool]] = None,\n enable_custom_ca_trust: Optional[pulumi.Input[bool]] = None,\n enable_encryption_at_host: Optional[pulumi.Input[bool]] = None,\n enable_fips: Optional[pulumi.Input[bool]] = None,\n enable_node_public_ip: Optional[pulumi.Input[bool]] = None,\n enable_ultra_ssd: Optional[pulumi.Input[bool]] = None,\n gpu_instance_profile: Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]] = None,\n host_group_id: Optional[pulumi.Input[str]] = None,\n kubelet_config: Optional[pulumi.Input['KubeletConfigArgs']] = None,\n kubelet_disk_type: Optional[pulumi.Input[Union[str, 'KubeletDiskType']]] = None,\n linux_os_config: Optional[pulumi.Input['LinuxOSConfigArgs']] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n message_of_the_day: Optional[pulumi.Input[str]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,\n network_profile: Optional[pulumi.Input['AgentPoolNetworkProfileArgs']] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_public_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n orchestrator_version: Optional[pulumi.Input[str]] = None,\n os_disk_size_gb: Optional[pulumi.Input[int]] = None,\n os_disk_type: Optional[pulumi.Input[Union[str, 'OSDiskType']]] = None,\n os_sku: Optional[pulumi.Input[Union[str, 'OSSKU']]] = None,\n os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,\n pod_subnet_id: Optional[pulumi.Input[str]] = None,\n power_state: Optional[pulumi.Input['PowerStateArgs']] = None,\n proximity_placement_group_id: Optional[pulumi.Input[str]] = None,\n scale_down_mode: Optional[pulumi.Input[Union[str, 'ScaleDownMode']]] = None,\n scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,\n scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,\n security_profile: Optional[pulumi.Input['AgentPoolSecurityProfileArgs']] = None,\n spot_max_price: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,\n upgrade_settings: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']] = None,\n vm_size: Optional[pulumi.Input[str]] = None,\n vnet_subnet_id: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input['AgentPoolWindowsProfileArgs']] = None,\n workload_runtime: Optional[pulumi.Input[Union[str, 'WorkloadRuntime']]] = None):\n pulumi.set(__self__, \"name\", name)\n if availability_zones is not None:\n pulumi.set(__self__, \"availability_zones\", availability_zones)\n if capacity_reservation_group_id is not None:\n pulumi.set(__self__, \"capacity_reservation_group_id\", capacity_reservation_group_id)\n if count is not None:\n pulumi.set(__self__, \"count\", count)\n if creation_data is not None:\n pulumi.set(__self__, \"creation_data\", creation_data)\n if enable_auto_scaling is not None:\n pulumi.set(__self__, \"enable_auto_scaling\", enable_auto_scaling)\n if enable_custom_ca_trust is not None:\n pulumi.set(__self__, \"enable_custom_ca_trust\", enable_custom_ca_trust)\n if enable_encryption_at_host is not None:\n pulumi.set(__self__, \"enable_encryption_at_host\", enable_encryption_at_host)\n if enable_fips is not None:\n pulumi.set(__self__, \"enable_fips\", enable_fips)\n if enable_node_public_ip is not None:\n pulumi.set(__self__, \"enable_node_public_ip\", enable_node_public_ip)\n if enable_ultra_ssd is not None:\n pulumi.set(__self__, \"enable_ultra_ssd\", enable_ultra_ssd)\n if gpu_instance_profile is not None:\n pulumi.set(__self__, \"gpu_instance_profile\", gpu_instance_profile)\n if host_group_id is not None:\n pulumi.set(__self__, \"host_group_id\", host_group_id)\n if kubelet_config is not None:\n pulumi.set(__self__, \"kubelet_config\", kubelet_config)\n if kubelet_disk_type is not None:\n pulumi.set(__self__, \"kubelet_disk_type\", kubelet_disk_type)\n if linux_os_config is not None:\n pulumi.set(__self__, \"linux_os_config\", linux_os_config)\n if max_count is not None:\n pulumi.set(__self__, \"max_count\", max_count)\n if max_pods is not None:\n pulumi.set(__self__, \"max_pods\", max_pods)\n if message_of_the_day is not None:\n pulumi.set(__self__, \"message_of_the_day\", message_of_the_day)\n if min_count is not None:\n pulumi.set(__self__, \"min_count\", min_count)\n if mode is not None:\n pulumi.set(__self__, \"mode\", mode)\n if network_profile is not None:\n pulumi.set(__self__, \"network_profile\", network_profile)\n if node_labels is not None:\n pulumi.set(__self__, \"node_labels\", node_labels)\n if node_public_ip_prefix_id is not None:\n pulumi.set(__self__, \"node_public_ip_prefix_id\", node_public_ip_prefix_id)\n if node_taints is not None:\n pulumi.set(__self__, \"node_taints\", node_taints)\n if orchestrator_version is not None:\n pulumi.set(__self__, \"orchestrator_version\", orchestrator_version)\n if os_disk_size_gb is not None:\n pulumi.set(__self__, \"os_disk_size_gb\", os_disk_size_gb)\n if os_disk_type is not None:\n pulumi.set(__self__, \"os_disk_type\", os_disk_type)\n if os_sku is not None:\n pulumi.set(__self__, \"os_sku\", os_sku)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if pod_subnet_id is not None:\n pulumi.set(__self__, \"pod_subnet_id\", pod_subnet_id)\n if power_state is not None:\n pulumi.set(__self__, \"power_state\", power_state)\n if proximity_placement_group_id is not None:\n pulumi.set(__self__, \"proximity_placement_group_id\", proximity_placement_group_id)\n if scale_down_mode is not None:\n pulumi.set(__self__, \"scale_down_mode\", scale_down_mode)\n if scale_set_eviction_policy is not None:\n pulumi.set(__self__, \"scale_set_eviction_policy\", scale_set_eviction_policy)\n if scale_set_priority is not None:\n pulumi.set(__self__, \"scale_set_priority\", scale_set_priority)\n if security_profile is not None:\n pulumi.set(__self__, \"security_profile\", security_profile)\n if spot_max_price is not None:\n pulumi.set(__self__, \"spot_max_price\", spot_max_price)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if upgrade_settings is not None:\n pulumi.set(__self__, \"upgrade_settings\", upgrade_settings)\n if vm_size is not None:\n pulumi.set(__self__, \"vm_size\", vm_size)\n if vnet_subnet_id is not None:\n pulumi.set(__self__, \"vnet_subnet_id\", vnet_subnet_id)\n if windows_profile is not None:\n pulumi.set(__self__, \"windows_profile\", windows_profile)\n if workload_runtime is not None:\n pulumi.set(__self__, \"workload_runtime\", workload_runtime)", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def do_run(cs, args):\n opts = {}\n opts['name'] = args.name\n opts['image'] = args.image\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['environment'] = zun_utils.format_args(args.environment)\n opts['workdir'] = args.workdir\n opts['auto_remove'] = args.auto_remove\n opts['labels'] = zun_utils.format_args(args.label)\n opts['image_pull_policy'] = args.image_pull_policy\n opts['image_driver'] = args.image_driver\n opts['hints'] = zun_utils.format_args(args.hint)\n opts['nets'] = zun_utils.parse_nets(args.net)\n opts['mounts'] = zun_utils.parse_mounts(args.mount)\n opts['runtime'] = args.runtime\n opts['hostname'] = args.hostname\n opts['disk'] = args.disk\n opts['availability_zone'] = args.availability_zone\n opts['command'] = args.command\n opts['registry'] = args.registry\n opts['host'] = args.host\n if args.entrypoint:\n opts['entrypoint'] = zun_utils.parse_entrypoint(args.entrypoint)\n if args.healthcheck:\n opts['healthcheck'] = zun_utils.parse_health(args.healthcheck)\n\n if args.auto_heal:\n opts['auto_heal'] = args.auto_heal\n if args.security_group:\n opts['security_groups'] = args.security_group\n if args.expose_port:\n opts['exposed_ports'] = zun_utils.parse_exposed_ports(args.expose_port)\n if args.restart:\n opts['restart_policy'] = zun_utils.check_restart_policy(args.restart)\n if args.interactive:\n opts['interactive'] = True\n if args.privileged:\n opts['privileged'] = True\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.run(**opts)\n _show_container(container)\n container_uuid = getattr(container, 'uuid', None)\n if args.interactive:\n ready_for_attach = False\n while True:\n container = cs.containers.get(container_uuid)\n if zun_utils.check_container_status(container, 'Running'):\n ready_for_attach = True\n break\n if zun_utils.check_container_status(container, 'Error'):\n raise exceptions.ContainerStateError(container_uuid)\n print(\"Waiting for container start\")\n time.sleep(1)\n if ready_for_attach is True:\n response = cs.containers.attach(container_uuid)\n websocketclient.do_attach(cs, response, container_uuid, \"~\", 0.5)\n else:\n raise exceptions.InvalidWebSocketLink(container_uuid)", "def __init__(__self__,\n resource_name: str,\n args: ImageInitArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n cloud_provider_profile: Optional[pulumi.Input['CloudProviderProfileArgs']] = None,\n count: Optional[pulumi.Input[int]] = None,\n linux_profile: Optional[pulumi.Input['LinuxProfilePropertiesArgs']] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[Union[str, 'Mode']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n node_image_version: Optional[pulumi.Input[str]] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n os_type: Optional[pulumi.Input[Union[str, 'OsType']]] = None,\n vm_size: Optional[pulumi.Input[str]] = None):\n if availability_zones is not None:\n pulumi.set(__self__, \"availability_zones\", availability_zones)\n if cloud_provider_profile is not None:\n pulumi.set(__self__, \"cloud_provider_profile\", cloud_provider_profile)\n if count is None:\n count = 1\n if count is not None:\n pulumi.set(__self__, \"count\", count)\n if linux_profile is not None:\n pulumi.set(__self__, \"linux_profile\", linux_profile)\n if max_count is not None:\n pulumi.set(__self__, \"max_count\", max_count)\n if max_pods is not None:\n pulumi.set(__self__, \"max_pods\", max_pods)\n if min_count is not None:\n pulumi.set(__self__, \"min_count\", min_count)\n if mode is None:\n mode = 'User'\n if mode is not None:\n pulumi.set(__self__, \"mode\", mode)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_image_version is not None:\n pulumi.set(__self__, \"node_image_version\", node_image_version)\n if node_labels is not None:\n pulumi.set(__self__, \"node_labels\", node_labels)\n if node_taints is not None:\n pulumi.set(__self__, \"node_taints\", node_taints)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if vm_size is not None:\n pulumi.set(__self__, \"vm_size\", vm_size)", "def __init__(__self__, *,\n boot_disk_kms_key: Optional[pulumi.Input[str]] = None,\n disk_size_gb: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n image_type: Optional[pulumi.Input[str]] = None,\n management: Optional[pulumi.Input['NodeManagementArgs']] = None,\n min_cpu_platform: Optional[pulumi.Input[str]] = None,\n oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n service_account: Optional[pulumi.Input[str]] = None,\n shielded_instance_config: Optional[pulumi.Input['ShieldedInstanceConfigArgs']] = None,\n upgrade_settings: Optional[pulumi.Input['UpgradeSettingsArgs']] = None):\n if boot_disk_kms_key is not None:\n pulumi.set(__self__, \"boot_disk_kms_key\", boot_disk_kms_key)\n if disk_size_gb is not None:\n pulumi.set(__self__, \"disk_size_gb\", disk_size_gb)\n if disk_type is not None:\n pulumi.set(__self__, \"disk_type\", disk_type)\n if image_type is not None:\n pulumi.set(__self__, \"image_type\", image_type)\n if management is not None:\n pulumi.set(__self__, \"management\", management)\n if min_cpu_platform is not None:\n warnings.warn(\"\"\"Deprecated. Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy Bridge. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform). This field is deprecated, min_cpu_platform should be specified using `cloud.google.com/requested-min-cpu-platform` label selector on the pod. To unset the min cpu platform field pass \\\"automatic\\\" as field value.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"min_cpu_platform is deprecated: Deprecated. Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy Bridge. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform). This field is deprecated, min_cpu_platform should be specified using `cloud.google.com/requested-min-cpu-platform` label selector on the pod. To unset the min cpu platform field pass \\\"automatic\\\" as field value.\"\"\")\n if min_cpu_platform is not None:\n pulumi.set(__self__, \"min_cpu_platform\", min_cpu_platform)\n if oauth_scopes is not None:\n pulumi.set(__self__, \"oauth_scopes\", oauth_scopes)\n if service_account is not None:\n pulumi.set(__self__, \"service_account\", service_account)\n if shielded_instance_config is not None:\n pulumi.set(__self__, \"shielded_instance_config\", shielded_instance_config)\n if upgrade_settings is not None:\n pulumi.set(__self__, \"upgrade_settings\", upgrade_settings)", "def cloud_init(name, vm_=None, **kwargs):\n init_interface = cloud_init_interface(name, vm_, **kwargs)\n name = init_interface.pop(\"name\", name)\n return init(name, **init_interface)", "def create(\n name, config=None, profile=None, network_profile=None, nic_opts=None, **kwargs\n):\n # Required params for 'download' template\n download_template_deps = (\"dist\", \"release\", \"arch\")\n\n cmd = f\"lxc-create -n {name}\"\n\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, None)\n profile_match = profile.pop(key, default)\n # Return the profile match if the kwarg match was None, as the\n # lxc.present state will pass these kwargs set to None by default.\n if kw_overrides_match is None:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n if exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' already exists\")\n\n tvg = select(\"vgname\")\n vgname = tvg if tvg else __salt__[\"config.get\"](\"lxc.vgname\")\n\n # The 'template' and 'image' params conflict\n template = select(\"template\")\n image = select(\"image\")\n if template and image:\n raise SaltInvocationError(\"Only one of 'template' and 'image' is permitted\")\n elif not any((template, image, profile)):\n raise SaltInvocationError(\n \"At least one of 'template', 'image', and 'profile' is required\"\n )\n\n options = select(\"options\") or {}\n backing = select(\"backing\")\n if vgname and not backing:\n backing = \"lvm\"\n lvname = select(\"lvname\")\n thinpool = select(\"thinpool\")\n fstype = select(\"fstype\")\n size = select(\"size\", \"1G\")\n zfsroot = select(\"zfsroot\")\n if backing in (\"dir\", \"overlayfs\", \"btrfs\", \"zfs\"):\n fstype = None\n size = None\n # some backends won't support some parameters\n if backing in (\"aufs\", \"dir\", \"overlayfs\", \"btrfs\"):\n lvname = vgname = thinpool = None\n\n if image:\n img_tar = __salt__[\"cp.cache_file\"](image)\n template = os.path.join(\n os.path.dirname(salt.__file__), \"templates\", \"lxc\", \"salt_tarball\"\n )\n options[\"imgtar\"] = img_tar\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n if not os.path.exists(path):\n os.makedirs(path)\n if config:\n cmd += f\" -f {config}\"\n if template:\n cmd += f\" -t {template}\"\n if backing:\n backing = backing.lower()\n cmd += f\" -B {backing}\"\n if backing in (\"zfs\",):\n if zfsroot:\n cmd += f\" --zfsroot {zfsroot}\"\n if backing in (\"lvm\",):\n if lvname:\n cmd += f\" --lvname {lvname}\"\n if vgname:\n cmd += f\" --vgname {vgname}\"\n if thinpool:\n cmd += f\" --thinpool {thinpool}\"\n if backing not in (\"dir\", \"overlayfs\"):\n if fstype:\n cmd += f\" --fstype {fstype}\"\n if size:\n cmd += f\" --fssize {size}\"\n\n if options:\n if template == \"download\":\n missing_deps = [x for x in download_template_deps if x not in options]\n if missing_deps:\n raise SaltInvocationError(\n \"Missing params in 'options' dict: {}\".format(\n \", \".join(missing_deps)\n )\n )\n cmd += \" --\"\n for key, val in options.items():\n cmd += f\" --{key} {val}\"\n\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n # please do not merge extra conflicting stuff\n # inside those two line (ret =, return)\n return _after_ignition_network_profile(\n cmd, ret, name, network_profile, path, nic_opts\n )", "def create_kubernetes_cluster(\n self,\n name: str,\n template: Union[dto.KubernetesClusterTemplate, str],\n master_size: Union[dto.Size, str],\n worker_size: Union[dto.Size, str],\n worker_count: Optional[int] = None,\n min_worker_count: Optional[int] = None,\n max_worker_count: Optional[int] = None,\n auto_scaling_enabled: bool = False,\n ssh_key: Optional[str] = None\n ) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def ddtest_create_generic_container_w_empty_or_null_name(self, name=None):\n if name is None:\n self._skip_on_issue('launchpad', '1354767')\n\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=container_resp.id, type='generic')", "def create_container(ContainerName=None, Tags=None):\n pass" ]
[ "0.66820186", "0.5803766", "0.57493997", "0.5640015", "0.5627814", "0.56264293", "0.5565917", "0.5543074", "0.5533584", "0.5519633", "0.55153155", "0.54776216", "0.54776216", "0.54749733", "0.5470206", "0.5462942", "0.5453705", "0.5453369", "0.54431856", "0.54233575", "0.54230887", "0.5417577", "0.5415423", "0.5408242", "0.54065675", "0.5401631", "0.53958863", "0.53481454", "0.5345091", "0.53398204" ]
0.60662985
1
Thin wrapper to lxc.init to be used from the saltcloud lxc driver name Name of the container may be None and then guessed from saltcloud mapping `vm_` saltcloud mapping defaults for the vm
def cloud_init(name, vm_=None, **kwargs): init_interface = cloud_init_interface(name, vm_, **kwargs) name = init_interface.pop("name", name) return init(name, **init_interface)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cloud_init_interface(name, vm_=None, **kwargs):\n if vm_ is None:\n vm_ = {}\n vm_ = copy.deepcopy(vm_)\n vm_ = salt.utils.dictupdate.update(vm_, kwargs)\n\n profile_data = copy.deepcopy(vm_.get(\"lxc_profile\", vm_.get(\"profile\", {})))\n if not isinstance(profile_data, (dict, (str,))):\n profile_data = {}\n profile = get_container_profile(profile_data)\n\n def _cloud_get(k, default=None):\n return vm_.get(k, profile.get(k, default))\n\n if name is None:\n name = vm_[\"name\"]\n # if we are on ubuntu, default to ubuntu\n default_template = \"\"\n if __grains__.get(\"os\", \"\") in [\"Ubuntu\"]:\n default_template = \"ubuntu\"\n image = _cloud_get(\"image\")\n if not image:\n _cloud_get(\"template\", default_template)\n backing = _cloud_get(\"backing\", \"dir\")\n if image:\n profile[\"template\"] = image\n vgname = _cloud_get(\"vgname\", None)\n if vgname:\n profile[\"vgname\"] = vgname\n if backing:\n profile[\"backing\"] = backing\n snapshot = _cloud_get(\"snapshot\", False)\n autostart = bool(_cloud_get(\"autostart\", True))\n dnsservers = _cloud_get(\"dnsservers\", [])\n dns_via_dhcp = _cloud_get(\"dns_via_dhcp\", True)\n password = _cloud_get(\"password\", \"s3cr3t\")\n password_encrypted = _cloud_get(\"password_encrypted\", False)\n fstype = _cloud_get(\"fstype\", None)\n lvname = _cloud_get(\"lvname\", None)\n thinpool = _cloud_get(\"thinpool\", None)\n pub_key = _cloud_get(\"pub_key\", None)\n priv_key = _cloud_get(\"priv_key\", None)\n size = _cloud_get(\"size\", \"20G\")\n script = _cloud_get(\"script\", None)\n script_args = _cloud_get(\"script_args\", None)\n users = _cloud_get(\"users\", None)\n if users is None:\n users = []\n ssh_username = _cloud_get(\"ssh_username\", None)\n if ssh_username and (ssh_username not in users):\n users.append(ssh_username)\n network_profile = _cloud_get(\"network_profile\", None)\n nic_opts = kwargs.get(\"nic_opts\", None)\n netmask = _cloud_get(\"netmask\", \"24\")\n path = _cloud_get(\"path\", None)\n bridge = _cloud_get(\"bridge\", None)\n gateway = _cloud_get(\"gateway\", None)\n unconditional_install = _cloud_get(\"unconditional_install\", False)\n force_install = _cloud_get(\"force_install\", True)\n config = _get_salt_config(_cloud_get(\"config\", {}), **vm_)\n default_nic = _cloud_get(\"default_nic\", DEFAULT_NIC)\n # do the interface with lxc.init mainly via nic_opts\n # to avoid extra and confusing extra use cases.\n if not isinstance(nic_opts, dict):\n nic_opts = salt.utils.odict.OrderedDict()\n # have a reference to the default nic\n eth0 = nic_opts.setdefault(default_nic, salt.utils.odict.OrderedDict())\n # lxc config is based of ifc order, be sure to use odicts.\n if not isinstance(nic_opts, salt.utils.odict.OrderedDict):\n bnic_opts = salt.utils.odict.OrderedDict()\n bnic_opts.update(nic_opts)\n nic_opts = bnic_opts\n gw = None\n # legacy salt.cloud scheme for network interfaces settings support\n bridge = _cloud_get(\"bridge\", None)\n ip = _cloud_get(\"ip\", None)\n mac = _cloud_get(\"mac\", None)\n if ip:\n fullip = ip\n if netmask:\n fullip += f\"/{netmask}\"\n eth0[\"ipv4\"] = fullip\n if mac is not None:\n eth0[\"mac\"] = mac\n for ix, iopts in enumerate(_cloud_get(\"additional_ips\", [])):\n ifh = f\"eth{ix + 1}\"\n ethx = nic_opts.setdefault(ifh, {})\n if gw is None:\n gw = iopts.get(\"gateway\", ethx.get(\"gateway\", None))\n if gw:\n # only one and only one default gateway is allowed !\n eth0.pop(\"gateway\", None)\n gateway = None\n # even if the gateway if on default \"eth0\" nic\n # and we popped it will work\n # as we reinject or set it here.\n ethx[\"gateway\"] = gw\n elink = iopts.get(\"link\", ethx.get(\"link\", None))\n if elink:\n ethx[\"link\"] = elink\n # allow dhcp\n aip = iopts.get(\"ipv4\", iopts.get(\"ip\", None))\n if aip:\n ethx[\"ipv4\"] = aip\n nm = iopts.get(\"netmask\", \"\")\n if nm:\n ethx[\"ipv4\"] += f\"/{nm}\"\n for i in (\"mac\", \"hwaddr\"):\n if i in iopts:\n ethx[\"mac\"] = iopts[i]\n break\n if \"mac\" not in ethx:\n ethx[\"mac\"] = salt.utils.network.gen_mac()\n # last round checking for unique gateway and such\n gw = None\n for ethx in [a for a in nic_opts]:\n ndata = nic_opts[ethx]\n if gw:\n ndata.pop(\"gateway\", None)\n if \"gateway\" in ndata:\n gw = ndata[\"gateway\"]\n gateway = None\n # only use a default bridge / gateway if we configured them\n # via the legacy salt cloud configuration style.\n # On other cases, we should rely on settings provided by the new\n # salt lxc network profile style configuration which can\n # be also be overridden or a per interface basis via the nic_opts dict.\n if bridge:\n eth0[\"link\"] = bridge\n if gateway:\n eth0[\"gateway\"] = gateway\n #\n lxc_init_interface = {}\n lxc_init_interface[\"name\"] = name\n lxc_init_interface[\"config\"] = config\n lxc_init_interface[\"memory\"] = _cloud_get(\"memory\", 0) # nolimit\n lxc_init_interface[\"pub_key\"] = pub_key\n lxc_init_interface[\"priv_key\"] = priv_key\n lxc_init_interface[\"nic_opts\"] = nic_opts\n for clone_from in [\"clone_from\", \"clone\", \"from_container\"]:\n # clone_from should default to None if not available\n lxc_init_interface[\"clone_from\"] = _cloud_get(clone_from, None)\n if lxc_init_interface[\"clone_from\"] is not None:\n break\n lxc_init_interface[\"profile\"] = profile\n lxc_init_interface[\"snapshot\"] = snapshot\n lxc_init_interface[\"dnsservers\"] = dnsservers\n lxc_init_interface[\"fstype\"] = fstype\n lxc_init_interface[\"path\"] = path\n lxc_init_interface[\"vgname\"] = vgname\n lxc_init_interface[\"size\"] = size\n lxc_init_interface[\"lvname\"] = lvname\n lxc_init_interface[\"thinpool\"] = thinpool\n lxc_init_interface[\"force_install\"] = force_install\n lxc_init_interface[\"unconditional_install\"] = unconditional_install\n lxc_init_interface[\"bootstrap_url\"] = script\n lxc_init_interface[\"bootstrap_args\"] = script_args\n lxc_init_interface[\"bootstrap_shell\"] = _cloud_get(\"bootstrap_shell\", \"sh\")\n lxc_init_interface[\"bootstrap_delay\"] = _cloud_get(\"bootstrap_delay\", None)\n lxc_init_interface[\"autostart\"] = autostart\n lxc_init_interface[\"users\"] = users\n lxc_init_interface[\"password\"] = password\n lxc_init_interface[\"password_encrypted\"] = password_encrypted\n # be sure not to let objects goes inside the return\n # as this return will be msgpacked for use in the runner !\n lxc_init_interface[\"network_profile\"] = network_profile\n for i in [\"cpu\", \"cpuset\", \"cpushare\"]:\n if _cloud_get(i, None):\n try:\n lxc_init_interface[i] = vm_[i]\n except KeyError:\n lxc_init_interface[i] = profile[i]\n return lxc_init_interface", "def test_initialize_default(self, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar'})\n libvirt_mock.open.assert_called_with('lxc:///')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)", "def create_lxd_container(public_key=None, name=\"test_name\"):\n container = None\n\n # Format name so it's valid\n name = name.replace(\"_\", \"-\").replace(\".\", \"\")\n\n client = get_lxd_client()\n if not client:\n raise Exception(\"Unable to connect to LXD\")\n\n test_machine = \"test-{}-{}\".format(\n uuid.uuid4().hex[-4:],\n name,\n )\n\n private_key_path, public_key_path = find_n2vc_ssh_keys()\n\n try:\n # create profile w/cloud-init and juju ssh key\n if not public_key:\n public_key = \"\"\n with open(public_key_path, \"r\") as f:\n public_key = f.readline()\n\n client.profiles.create(\n test_machine,\n config={\n 'user.user-data': '#cloud-config\\nssh_authorized_keys:\\n- {}'.format(public_key)},\n devices={\n 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},\n 'eth0': {\n 'nictype': 'bridged',\n 'parent': 'lxdbr0',\n 'type': 'nic'\n }\n }\n )\n except Exception as ex:\n debug(\"Error creating lxd profile {}: {}\".format(test_machine, ex))\n raise ex\n\n try:\n # create lxc machine\n config = {\n 'name': test_machine,\n 'source': {\n 'type': 'image',\n 'alias': 'xenial',\n 'mode': 'pull',\n 'protocol': 'simplestreams',\n 'server': 'https://cloud-images.ubuntu.com/releases',\n },\n 'profiles': [test_machine],\n }\n container = client.containers.create(config, wait=True)\n container.start(wait=True)\n except Exception as ex:\n debug(\"Error creating lxd container {}: {}\".format(test_machine, ex))\n # This is a test-ending failure.\n raise ex\n\n def wait_for_network(container, timeout=30):\n \"\"\"Wait for eth0 to have an ipv4 address.\"\"\"\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if 'eth0' in container.state().network:\n addresses = container.state().network['eth0']['addresses']\n if len(addresses) > 0:\n if addresses[0]['family'] == 'inet':\n return addresses[0]\n return None\n\n try:\n wait_for_network(container)\n except Exception as ex:\n debug(\n \"Error waiting for container {} network: {}\".format(\n test_machine,\n ex,\n )\n )\n\n try:\n waitcount = 0\n while waitcount <= 5:\n if is_sshd_running(container):\n break\n waitcount += 1\n time.sleep(1)\n if waitcount >= 5:\n debug(\"couldn't detect sshd running\")\n raise Exception(\"Unable to verify container sshd\")\n\n except Exception as ex:\n debug(\n \"Error checking sshd status on {}: {}\".format(\n test_machine,\n ex,\n )\n )\n\n # HACK: We need to give sshd a chance to bind to the interface,\n # and pylxd's container.execute seems to be broken and fails and/or\n # hangs trying to properly check if the service is up.\n (exit_code, stdout, stderr) = container.execute([\n 'ping',\n '-c', '5', # Wait for 5 ECHO_REPLY\n '8.8.8.8', # Ping Google's public DNS\n '-W', '15', # Set a 15 second deadline\n ])\n if exit_code > 0:\n # The network failed\n raise Exception(\"Unable to verify container network\")\n\n return container", "def startLXCContainer(self,node,vmid):\n post_data = None\n data = self.connect('post','nodes/%s/lxc/%s/status/start' % (node,vmid), post_data)\n return data", "def boot(self, **kwargs):\n\n cloud = kwargs.get('cloud', Default.cloud)\n name = kwargs.get('name', Vm.generate_vm_name())\n image = kwargs.get('image', Default.image)\n flavor = kwargs.get('flavor', Default.flavor)\n key = kwargs.get('key', Default.key)\n secgroup = kwargs.get('secgroup', Default.secgroup)\n group = kwargs.get('group', Default.group)\n username = kwargs.get('username', Image.guess_username(image))\n cluster = kwargs.get('cluster', None)\n\n # shorthand for getting a dict of all the vm details\n #\n # IMPORTANT: anything declared prior to the call to `locals()`\n # may be passed to `Vm.boot`, so make sure that only parameters are\n # defined above this comment.\n details = locals()\n details.pop('kwargs')\n\n # currently, Vm.boot returns the instance UUID from the provider for openstack images\n # 2016/12/12\n uuid = Vm.boot(**details)\n\n\n # helper function: the Vm.boot only returns a UUID, but we\n # need to use the VM model instead. Additionally, we'll need\n # to poll the VM to wait until it is active.\n #\n # The kwargs are used to select the item from the DB:\n # eg: uuid=???, cm_id=???, etc\n def get_vm(**kwargs):\n \"\"\"Selects the VM based on the given properties\"\"\"\n model = self.db.vm_table_from_provider('openstack')\n vm = self.db.select(model, **kwargs).all()\n assert len(vm) == 1, vm\n vm = vm[0]\n return vm\n\n # get the VM from the UUID\n vm = get_vm(uuid=uuid)\n cm_id = vm.cm_id\n\n def is_active():\n Vm.refresh(cloud=cloud)\n vm = get_vm(cm_id=cm_id)\n return vm.status == 'ACTIVE'\n\n if not exponential_backoff(is_active):\n Console.error('Failed to get ACTIVE vm within timeframe')\n raise ValueError\n\n assert is_active()\n vm = get_vm(cm_id=cm_id)\n assert isinstance(vm, VM_OPENSTACK), vm.__class__\n\n return OpenstackNode(model=vm, provider=self)", "def __init__(self, conn, vm_name):\n self.vm = self.get_obj(conn, vim.VirtualMachine, vm_name)\n assert(self.vm)\n self.spec = vim.vm.ConfigSpec()", "def lxd_init(self, iface):\n lxd_init_cmds = [\n self.set_lxd_init_auto,\n self.set_lxc_config,\n self.set_lxd_storage,\n partial(self.setup_bridge_network, iface),\n self.setup_unused_bridge_network,\n self.set_default_profile\n ]\n\n for cmd in lxd_init_cmds:\n app.log.debug(\"LXD Init: {}\".format(cmd))\n cmd()", "def __init__(__self__, *,\n common: 'outputs.CSIVXFlexOSSpecDriverCommon',\n config_version: str,\n replicas: int,\n auth_secret: Optional[str] = None,\n controller: Optional['outputs.CSIVXFlexOSSpecDriverController'] = None,\n force_update: Optional[bool] = None,\n node: Optional['outputs.CSIVXFlexOSSpecDriverNode'] = None,\n side_cars: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSideCars']] = None,\n snapshot_class: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSnapshotClass']] = None,\n storage_class: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClass']] = None,\n tls_cert_secret: Optional[str] = None):\n pulumi.set(__self__, \"common\", common)\n pulumi.set(__self__, \"config_version\", config_version)\n pulumi.set(__self__, \"replicas\", replicas)\n if auth_secret is not None:\n pulumi.set(__self__, \"auth_secret\", auth_secret)\n if controller is not None:\n pulumi.set(__self__, \"controller\", controller)\n if force_update is not None:\n pulumi.set(__self__, \"force_update\", force_update)\n if node is not None:\n pulumi.set(__self__, \"node\", node)\n if side_cars is not None:\n pulumi.set(__self__, \"side_cars\", side_cars)\n if snapshot_class is not None:\n pulumi.set(__self__, \"snapshot_class\", snapshot_class)\n if storage_class is not None:\n pulumi.set(__self__, \"storage_class\", storage_class)\n if tls_cert_secret is not None:\n pulumi.set(__self__, \"tls_cert_secret\", tls_cert_secret)", "def init(\n name,\n config=None,\n cpuset=None,\n cpushare=None,\n memory=None,\n profile=None,\n network_profile=None,\n nic_opts=None,\n cpu=None,\n autostart=True,\n password=None,\n password_encrypted=None,\n users=None,\n dnsservers=None,\n searchdomains=None,\n bridge=None,\n gateway=None,\n pub_key=None,\n priv_key=None,\n force_install=False,\n unconditional_install=False,\n bootstrap_delay=None,\n bootstrap_args=None,\n bootstrap_shell=None,\n bootstrap_url=None,\n **kwargs,\n):\n ret = {\"name\": name, \"changes\": {}}\n\n profile = get_container_profile(copy.deepcopy(profile))\n if not network_profile:\n network_profile = profile.get(\"network_profile\")\n if not network_profile:\n network_profile = DEFAULT_NIC\n\n # Changes is a pointer to changes_dict['init']. This method is used so that\n # we can have a list of changes as they are made, providing an ordered list\n # of things that were changed.\n changes_dict = {\"init\": []}\n changes = changes_dict.get(\"init\")\n\n if users is None:\n users = []\n dusers = [\"root\"]\n for user in dusers:\n if user not in users:\n users.append(user)\n\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n bpath = get_root_path(path)\n state_pre = state(name, path=path)\n tvg = select(\"vgname\")\n vgname = tvg if tvg else __salt__[\"config.get\"](\"lxc.vgname\")\n start_ = select(\"start\", True)\n autostart = select(\"autostart\", autostart)\n seed = select(\"seed\", True)\n install = select(\"install\", True)\n seed_cmd = select(\"seed_cmd\")\n salt_config = _get_salt_config(config, **kwargs)\n approve_key = select(\"approve_key\", True)\n clone_from = select(\"clone_from\")\n\n # If using a volume group then set up to make snapshot cow clones\n if vgname and not clone_from:\n try:\n kwargs[\"vgname\"] = vgname\n clone_from = _get_base(profile=profile, **kwargs)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n if not kwargs.get(\"snapshot\") is False:\n kwargs[\"snapshot\"] = True\n does_exist = exists(name, path=path)\n to_reboot = False\n remove_seed_marker = False\n if does_exist:\n pass\n elif clone_from:\n remove_seed_marker = True\n try:\n clone(name, clone_from, profile=profile, **kwargs)\n changes.append({\"create\": \"Container cloned\"})\n except (SaltInvocationError, CommandExecutionError) as exc:\n if \"already exists\" in exc.strerror:\n changes.append({\"create\": \"Container already exists\"})\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n cfg = _LXCConfig(\n name=name,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n path=path,\n gateway=gateway,\n autostart=autostart,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n old_chunks = read_conf(cfg.path, out_format=\"commented\")\n cfg.write()\n chunks = read_conf(cfg.path, out_format=\"commented\")\n if old_chunks != chunks:\n to_reboot = True\n else:\n remove_seed_marker = True\n cfg = _LXCConfig(\n network_profile=network_profile,\n nic_opts=nic_opts,\n cpuset=cpuset,\n path=path,\n bridge=bridge,\n gateway=gateway,\n autostart=autostart,\n cpushare=cpushare,\n memory=memory,\n )\n with cfg.tempfile() as cfile:\n try:\n create(name, config=cfile.name, profile=profile, **kwargs)\n changes.append({\"create\": \"Container created\"})\n except (SaltInvocationError, CommandExecutionError) as exc:\n if \"already exists\" in exc.strerror:\n changes.append({\"create\": \"Container already exists\"})\n else:\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n cpath = os.path.join(bpath, name, \"config\")\n old_chunks = []\n if os.path.exists(cpath):\n old_chunks = read_conf(cpath, out_format=\"commented\")\n new_cfg = _config_list(\n conf_tuples=old_chunks,\n cpu=cpu,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n if new_cfg:\n edit_conf(cpath, out_format=\"commented\", lxc_config=new_cfg)\n chunks = read_conf(cpath, out_format=\"commented\")\n if old_chunks != chunks:\n to_reboot = True\n\n # last time to be sure any of our property is correctly applied\n cfg = _LXCConfig(\n name=name,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n path=path,\n gateway=gateway,\n autostart=autostart,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n old_chunks = []\n if os.path.exists(cfg.path):\n old_chunks = read_conf(cfg.path, out_format=\"commented\")\n cfg.write()\n chunks = read_conf(cfg.path, out_format=\"commented\")\n if old_chunks != chunks:\n changes.append({\"config\": \"Container configuration updated\"})\n to_reboot = True\n\n if to_reboot:\n try:\n stop(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n if not does_exist or (does_exist and state(name, path=path) != \"running\"):\n try:\n start(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n\n if remove_seed_marker:\n run(\n name,\n f\"rm -f '{SEED_MARKER}'\",\n path=path,\n chroot_fallback=False,\n python_shell=False,\n )\n\n # set the default user/password, only the first time\n if ret.get(\"result\", True) and password:\n gid = \"/.lxc.initial_pass\"\n gids = [gid, \"/lxc.initial_pass\", f\"/.lxc.{name}.initial_pass\"]\n if not any(\n retcode(\n name,\n f'test -e \"{x}\"',\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ):\n # think to touch the default user generated by default templates\n # which has a really unsecure passwords...\n # root is defined as a member earlier in the code\n for default_user in [\"ubuntu\"]:\n if (\n default_user not in users\n and retcode(\n name,\n f\"id {default_user}\",\n python_shell=False,\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n == 0\n ):\n users.append(default_user)\n for user in users:\n try:\n cret = set_password(\n name,\n users=[user],\n path=path,\n password=password,\n encrypted=password_encrypted,\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n msg = f\"{user}: Failed to set password\" + exc.strerror\n # only hardfail in unrecoverable situation:\n # root cannot be setted up\n if user == \"root\":\n ret[\"comment\"] = msg\n ret[\"result\"] = False\n else:\n log.debug(msg)\n if ret.get(\"result\", True):\n changes.append({\"password\": \"Password(s) updated\"})\n if (\n retcode(\n name,\n 'sh -c \\'touch \"{0}\"; test -e \"{0}\"\\''.format(gid),\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n != 0\n ):\n ret[\"comment\"] = \"Failed to set password marker\"\n changes[-1][\"password\"] += \". \" + ret[\"comment\"] + \".\"\n ret[\"result\"] = False\n\n # set dns servers if any, only the first time\n if ret.get(\"result\", True) and dnsservers:\n # retro compatibility, test also old markers\n gid = \"/.lxc.initial_dns\"\n gids = [gid, \"/lxc.initial_dns\", f\"/lxc.{name}.initial_dns\"]\n if not any(\n retcode(\n name,\n f'test -e \"{x}\"',\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ):\n try:\n set_dns(\n name, path=path, dnsservers=dnsservers, searchdomains=searchdomains\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Failed to set DNS: \" + exc.strerror\n ret[\"result\"] = False\n else:\n changes.append({\"dns\": \"DNS updated\"})\n if (\n retcode(\n name,\n 'sh -c \\'touch \"{0}\"; test -e \"{0}\"\\''.format(gid),\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n != 0\n ):\n ret[\"comment\"] = \"Failed to set DNS marker\"\n changes[-1][\"dns\"] += \". \" + ret[\"comment\"] + \".\"\n ret[\"result\"] = False\n\n # retro compatibility, test also old markers\n if remove_seed_marker:\n run(name, f\"rm -f '{SEED_MARKER}'\", path=path, python_shell=False)\n gid = \"/.lxc.initial_seed\"\n gids = [gid, \"/lxc.initial_seed\"]\n if any(\n retcode(\n name,\n f\"test -e {x}\",\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ) or not ret.get(\"result\", True):\n pass\n elif seed or seed_cmd:\n if seed:\n try:\n result = bootstrap(\n name,\n config=salt_config,\n path=path,\n approve_key=approve_key,\n pub_key=pub_key,\n priv_key=priv_key,\n install=install,\n force_install=force_install,\n unconditional_install=unconditional_install,\n bootstrap_delay=bootstrap_delay,\n bootstrap_url=bootstrap_url,\n bootstrap_shell=bootstrap_shell,\n bootstrap_args=bootstrap_args,\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Bootstrap failed: \" + exc.strerror\n ret[\"result\"] = False\n else:\n if not result:\n ret[\n \"comment\"\n ] = \"Bootstrap failed, see minion log for more information\"\n ret[\"result\"] = False\n else:\n changes.append({\"bootstrap\": \"Container successfully bootstrapped\"})\n elif seed_cmd:\n try:\n result = __salt__[seed_cmd](\n info(name, path=path)[\"rootfs\"], name, salt_config\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Bootstrap via seed_cmd '{}' failed: {}\".format(\n seed_cmd, exc.strerror\n )\n ret[\"result\"] = False\n else:\n if not result:\n ret[\"comment\"] = (\n \"Bootstrap via seed_cmd '{}' failed, \"\n \"see minion log for more information \".format(seed_cmd)\n )\n ret[\"result\"] = False\n else:\n changes.append(\n {\n \"bootstrap\": (\n \"Container successfully bootstrapped \"\n \"using seed_cmd '{}'\".format(seed_cmd)\n )\n }\n )\n\n if ret.get(\"result\", True) and not start_:\n try:\n stop(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n ret[\"result\"] = False\n\n state_post = state(name, path=path)\n if state_pre != state_post:\n changes.append({\"state\": {\"old\": state_pre, \"new\": state_post}})\n\n if ret.get(\"result\", True):\n ret[\"comment\"] = f\"Container '{name}' successfully initialized\"\n ret[\"result\"] = True\n if changes:\n ret[\"changes\"] = changes_dict\n return ret", "def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n agent_upgrade: Optional[pulumi.Input['AgentUpgradeArgs']] = None,\n client_public_key: Optional[pulumi.Input[str]] = None,\n extensions: Optional[pulumi.Input[Sequence[pulumi.Input['MachineExtensionInstanceViewArgs']]]] = None,\n identity: Optional[pulumi.Input['IdentityArgs']] = None,\n location: Optional[pulumi.Input[str]] = None,\n location_data: Optional[pulumi.Input['LocationDataArgs']] = None,\n machine_name: Optional[pulumi.Input[str]] = None,\n mssql_discovered: Optional[pulumi.Input[str]] = None,\n os_profile: Optional[pulumi.Input['OSProfileArgs']] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n parent_cluster_resource_id: Optional[pulumi.Input[str]] = None,\n private_link_scope_resource_id: Optional[pulumi.Input[str]] = None,\n service_statuses: Optional[pulumi.Input['ServiceStatusesArgs']] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vm_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if agent_upgrade is not None:\n pulumi.set(__self__, \"agent_upgrade\", agent_upgrade)\n if client_public_key is not None:\n pulumi.set(__self__, \"client_public_key\", client_public_key)\n if extensions is not None:\n pulumi.set(__self__, \"extensions\", extensions)\n if identity is not None:\n pulumi.set(__self__, \"identity\", identity)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if location_data is not None:\n pulumi.set(__self__, \"location_data\", location_data)\n if machine_name is not None:\n pulumi.set(__self__, \"machine_name\", machine_name)\n if mssql_discovered is not None:\n pulumi.set(__self__, \"mssql_discovered\", mssql_discovered)\n if os_profile is not None:\n pulumi.set(__self__, \"os_profile\", os_profile)\n if os_type is not None:\n pulumi.set(__self__, \"os_type\", os_type)\n if parent_cluster_resource_id is not None:\n pulumi.set(__self__, \"parent_cluster_resource_id\", parent_cluster_resource_id)\n if private_link_scope_resource_id is not None:\n pulumi.set(__self__, \"private_link_scope_resource_id\", private_link_scope_resource_id)\n if service_statuses is not None:\n pulumi.set(__self__, \"service_statuses\", service_statuses)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if vm_id is not None:\n pulumi.set(__self__, \"vm_id\", vm_id)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIPowerStoreSpecDriverSideCarsEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIPowerStoreSpecDriverSideCarsTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIPowerStoreSpecDriverControllerEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIPowerStoreSpecDriverControllerTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSideCarsEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverSideCarsTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverControllerEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverControllerTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def launch_lxc(app, ports):\n print 'Launching blank LXC for: ', app\n tstart = datetime.now()\n os.system('lxc-create -t alpine -f lxcConfig -n %s' % (app))\n tend = datetime.now()\n os.system('lxc-start -n %s' % app)\n ip = check_ip_status(app)\n install_ssh(app)\n forward_ports(app, ports, ip)\n gen_key(app)\n print 'Time elapsed for launch_lxc:', tend - tstart\n return ip", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIPowerStoreSpecDriverCommonEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIPowerStoreSpecDriverCommonTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverCommonEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIVXFlexOSSpecDriverCommonTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def test_initialize_hypervisor(self, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar', 'hypervisor': 'baz'})\n libvirt_mock.open.assert_called_with('baz')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)", "def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh", "def terraform_init():\n return subprocess.call([\n \"terraform\",\n \"init\",\n \"terraform/aws\"\n ])", "def __init__(__self__, *,\n cloud_run_config: Optional[pulumi.Input['CloudRunConfigArgs']] = None,\n config_connector_config: Optional[pulumi.Input['ConfigConnectorConfigArgs']] = None,\n dns_cache_config: Optional[pulumi.Input['DnsCacheConfigArgs']] = None,\n gce_persistent_disk_csi_driver_config: Optional[pulumi.Input['GcePersistentDiskCsiDriverConfigArgs']] = None,\n gcp_filestore_csi_driver_config: Optional[pulumi.Input['GcpFilestoreCsiDriverConfigArgs']] = None,\n gcs_fuse_csi_driver_config: Optional[pulumi.Input['GcsFuseCsiDriverConfigArgs']] = None,\n gke_backup_agent_config: Optional[pulumi.Input['GkeBackupAgentConfigArgs']] = None,\n horizontal_pod_autoscaling: Optional[pulumi.Input['HorizontalPodAutoscalingArgs']] = None,\n http_load_balancing: Optional[pulumi.Input['HttpLoadBalancingArgs']] = None,\n istio_config: Optional[pulumi.Input['IstioConfigArgs']] = None,\n kalm_config: Optional[pulumi.Input['KalmConfigArgs']] = None,\n kubernetes_dashboard: Optional[pulumi.Input['KubernetesDashboardArgs']] = None,\n network_policy_config: Optional[pulumi.Input['NetworkPolicyConfigArgs']] = None):\n if cloud_run_config is not None:\n pulumi.set(__self__, \"cloud_run_config\", cloud_run_config)\n if config_connector_config is not None:\n pulumi.set(__self__, \"config_connector_config\", config_connector_config)\n if dns_cache_config is not None:\n pulumi.set(__self__, \"dns_cache_config\", dns_cache_config)\n if gce_persistent_disk_csi_driver_config is not None:\n pulumi.set(__self__, \"gce_persistent_disk_csi_driver_config\", gce_persistent_disk_csi_driver_config)\n if gcp_filestore_csi_driver_config is not None:\n pulumi.set(__self__, \"gcp_filestore_csi_driver_config\", gcp_filestore_csi_driver_config)\n if gcs_fuse_csi_driver_config is not None:\n pulumi.set(__self__, \"gcs_fuse_csi_driver_config\", gcs_fuse_csi_driver_config)\n if gke_backup_agent_config is not None:\n pulumi.set(__self__, \"gke_backup_agent_config\", gke_backup_agent_config)\n if horizontal_pod_autoscaling is not None:\n pulumi.set(__self__, \"horizontal_pod_autoscaling\", horizontal_pod_autoscaling)\n if http_load_balancing is not None:\n pulumi.set(__self__, \"http_load_balancing\", http_load_balancing)\n if istio_config is not None:\n pulumi.set(__self__, \"istio_config\", istio_config)\n if kalm_config is not None:\n pulumi.set(__self__, \"kalm_config\", kalm_config)\n if kubernetes_dashboard is not None:\n pulumi.set(__self__, \"kubernetes_dashboard\", kubernetes_dashboard)\n if network_policy_config is not None:\n pulumi.set(__self__, \"network_policy_config\", network_policy_config)", "def __init__(__self__, *,\n blob_csi_driver: Optional[pulumi.Input['ManagedClusterStorageProfileBlobCSIDriverArgs']] = None,\n disk_csi_driver: Optional[pulumi.Input['ManagedClusterStorageProfileDiskCSIDriverArgs']] = None,\n file_csi_driver: Optional[pulumi.Input['ManagedClusterStorageProfileFileCSIDriverArgs']] = None,\n snapshot_controller: Optional[pulumi.Input['ManagedClusterStorageProfileSnapshotControllerArgs']] = None):\n if blob_csi_driver is not None:\n pulumi.set(__self__, \"blob_csi_driver\", blob_csi_driver)\n if disk_csi_driver is not None:\n pulumi.set(__self__, \"disk_csi_driver\", disk_csi_driver)\n if file_csi_driver is not None:\n pulumi.set(__self__, \"file_csi_driver\", file_csi_driver)\n if snapshot_controller is not None:\n pulumi.set(__self__, \"snapshot_controller\", snapshot_controller)", "def init():\n\n @click.command()\n @click.option('--cell',\n callback=cli.handle_context_opt,\n envvar='TREADMILL_CELL',\n expose_value=False,\n required=True)\n @click.argument('app-or-svc')\n @click.option('--host',\n help='Hostname where to look for the logs',\n required=True)\n @click.option('--uniq',\n help='The container uniq id',\n required=False)\n @click.option('--service',\n help='The name of the service for which the logs are '\n 'to be retreived',\n required=False)\n def logs(app_or_svc, host, uniq, service):\n \"\"\"View application's service logs.\"\"\"\n try:\n app, uniq, logtype, logname = app_or_svc.split('/', 3)\n except ValueError:\n app, uniq, logtype, logname = app_or_svc, uniq, 'service', service\n\n if any(param is None for param in [app, uniq, logtype, logname]):\n cli.bad_exit('Incomplete parameter list')\n\n _host, port = _nodeinfo_endpoint(host)\n\n api = 'http://{0}:{1}'.format(host, port)\n logurl = '/local-app/%s/%s/%s/%s' % (\n urllib_parse.quote(app),\n urllib_parse.quote(uniq),\n logtype,\n urllib_parse.quote(logname)\n )\n\n log = restclient.get(api, logurl)\n click.echo(log.text)\n\n return logs", "def init():\n\n @click.command(name='autoscale')\n @click.option(\n '--ipa-certs', required=False, envvar='TREADMILL_IPA_CERTS',\n callback=aws_cli.handle_context_opt,\n is_eager=True,\n default='/etc/ipa/ca.crt',\n expose_value=False\n )\n @click.option(\n '--timeout', required=False, default=_DEFAULT_TIMEOUT, type=int,\n help='Time interval to evaluate state (seconds).'\n )\n @click.option(\n '--max-count', required=True, type=int,\n help='Max server count.'\n )\n @click.option(\n '--min-count', required=False, type=int, default=0,\n help='Min server count.'\n )\n @click.option(\n '--batch-count', required=True, type=int,\n help='Max batch count for new servers.'\n )\n @click.option(\n '--app-srv-ratio', required=False, type=float,\n default=_DEFAULT_APP_SERVER_RATIO,\n help='Default app/server ratio.'\n )\n def autoscale_cmd(timeout, max_count, min_count, batch_count,\n app_srv_ratio):\n \"\"\"Autoscale Treadmill cell based on scheduler queue.\"\"\"\n while True:\n create_cnt, extra_servers = autoscale.scale(\n max_servers=max_count,\n min_servers=min_count,\n default_app_srv_ratio=app_srv_ratio,\n max_batch=batch_count)\n if create_cnt > 0:\n autoscale.create_n_servers(create_cnt, partition=None)\n\n if extra_servers:\n autoscale.delete_servers_by_name(extra_servers)\n\n time.sleep(timeout)\n\n return autoscale_cmd", "def _open_stack_try_create_vm_(self, srv_name, metadata={}):\n for srv in self.cli.servers.list():\n if srv.name == srv_name:\n if not self.lazy_start and srv.status == 'SHUTOFF':\n srv.start()\n self.cli.servers.set_meta(srv, metadata)\n return srv\n return self._open_stack_create_vm_(srv_name, metadata)", "def __init__(__self__, *,\n connection_string: Optional[pulumi.Input[str]] = None,\n connection_string_prefix: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_description: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_id: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_type: Optional[pulumi.Input[str]] = None,\n db_instance_id: Optional[pulumi.Input[str]] = None,\n ip_type: Optional[pulumi.Input[str]] = None,\n node_items: Optional[pulumi.Input[Sequence[pulumi.Input['DbInstanceEndpointNodeItemArgs']]]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None):\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if connection_string_prefix is not None:\n pulumi.set(__self__, \"connection_string_prefix\", connection_string_prefix)\n if db_instance_endpoint_description is not None:\n pulumi.set(__self__, \"db_instance_endpoint_description\", db_instance_endpoint_description)\n if db_instance_endpoint_id is not None:\n pulumi.set(__self__, \"db_instance_endpoint_id\", db_instance_endpoint_id)\n if db_instance_endpoint_type is not None:\n pulumi.set(__self__, \"db_instance_endpoint_type\", db_instance_endpoint_type)\n if db_instance_id is not None:\n pulumi.set(__self__, \"db_instance_id\", db_instance_id)\n if ip_type is not None:\n pulumi.set(__self__, \"ip_type\", ip_type)\n if node_items is not None:\n pulumi.set(__self__, \"node_items\", node_items)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if vswitch_id is not None:\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)", "def __init__(__self__, *,\n common: 'outputs.CSIUnitySpecDriverCommon',\n config_version: str,\n replicas: int,\n auth_secret: Optional[str] = None,\n controller: Optional['outputs.CSIUnitySpecDriverController'] = None,\n force_update: Optional[bool] = None,\n node: Optional['outputs.CSIUnitySpecDriverNode'] = None,\n side_cars: Optional[Sequence['outputs.CSIUnitySpecDriverSideCars']] = None,\n snapshot_class: Optional[Sequence['outputs.CSIUnitySpecDriverSnapshotClass']] = None,\n storage_class: Optional[Sequence['outputs.CSIUnitySpecDriverStorageClass']] = None,\n tls_cert_secret: Optional[str] = None):\n pulumi.set(__self__, \"common\", common)\n pulumi.set(__self__, \"config_version\", config_version)\n pulumi.set(__self__, \"replicas\", replicas)\n if auth_secret is not None:\n pulumi.set(__self__, \"auth_secret\", auth_secret)\n if controller is not None:\n pulumi.set(__self__, \"controller\", controller)\n if force_update is not None:\n pulumi.set(__self__, \"force_update\", force_update)\n if node is not None:\n pulumi.set(__self__, \"node\", node)\n if side_cars is not None:\n pulumi.set(__self__, \"side_cars\", side_cars)\n if snapshot_class is not None:\n pulumi.set(__self__, \"snapshot_class\", snapshot_class)\n if storage_class is not None:\n pulumi.set(__self__, \"storage_class\", storage_class)\n if tls_cert_secret is not None:\n pulumi.set(__self__, \"tls_cert_secret\", tls_cert_secret)", "def __init__(__self__, *,\n cluster_name: Optional[pulumi.Input[str]] = None,\n external_server_urls: Optional[pulumi.Input[Sequence[pulumi.Input['KlusterletSpecExternalServerURLsArgs']]]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n registration_image_pull_spec: Optional[pulumi.Input[str]] = None,\n work_image_pull_spec: Optional[pulumi.Input[str]] = None):\n if cluster_name is not None:\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n if external_server_urls is not None:\n pulumi.set(__self__, \"external_server_urls\", external_server_urls)\n if namespace is not None:\n pulumi.set(__self__, \"namespace\", namespace)\n if registration_image_pull_spec is not None:\n pulumi.set(__self__, \"registration_image_pull_spec\", registration_image_pull_spec)\n if work_image_pull_spec is not None:\n pulumi.set(__self__, \"work_image_pull_spec\", work_image_pull_spec)", "def __init__(__self__, *,\n args: Optional[Sequence[str]] = None,\n envs: Optional[Sequence['outputs.CSIIsilonSpecDriverSideCarsEnvs']] = None,\n image: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n name: Optional[str] = None,\n node_selector: Optional[Mapping[str, str]] = None,\n tolerations: Optional[Sequence['outputs.CSIIsilonSpecDriverSideCarsTolerations']] = None):\n if args is not None:\n pulumi.set(__self__, \"args\", args)\n if envs is not None:\n pulumi.set(__self__, \"envs\", envs)\n if image is not None:\n pulumi.set(__self__, \"image\", image)\n if image_pull_policy is not None:\n pulumi.set(__self__, \"image_pull_policy\", image_pull_policy)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if node_selector is not None:\n pulumi.set(__self__, \"node_selector\", node_selector)\n if tolerations is not None:\n pulumi.set(__self__, \"tolerations\", tolerations)", "def start_container(client, work_package, load_saved):\n package_path = os.path.join(PATH, \"work_packages\")\n\n client.containers.run(image=\"scrape_light\",\n environment=[\"PACKAGE=\"+work_package, \"LOAD_FILE=\" + load_saved,\n \"[email protected]\", \"PASSWORD=LA#kYs1#o:`Z\"],\n detach=True, tty=True, stdin_open=True,\n sysctls={\"net.ipv4.conf.all.rp_filter\": 2},\n privileged=True,\n devices=[\"/dev/net/tun\"],\n name=\"scrape_\" + str(work_package),\n cap_add=[\"NET_ADMIN\", \"SYS_MODULE\"],\n volumes={package_path: {\"bind\": \"/work_packages\"}})" ]
[ "0.7191671", "0.59593487", "0.5664225", "0.5561173", "0.5544013", "0.5525819", "0.5516496", "0.549404", "0.5465016", "0.5461598", "0.546096", "0.5455348", "0.5424041", "0.53970885", "0.53882545", "0.53578675", "0.5315698", "0.5307036", "0.5298775", "0.5289854", "0.5282188", "0.5271831", "0.52577573", "0.5254017", "0.5247854", "0.5237639", "0.52308637", "0.5213587", "0.5197475", "0.51973283" ]
0.6503301
1
Create a new container. name Name of the container config The config file to use for the container. Defaults to systemwide config (usually in /etc/lxc/lxc.conf). profile Profile to use in container creation (see
def create( name, config=None, profile=None, network_profile=None, nic_opts=None, **kwargs ): # Required params for 'download' template download_template_deps = ("dist", "release", "arch") cmd = f"lxc-create -n {name}" profile = get_container_profile(copy.deepcopy(profile)) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, None) profile_match = profile.pop(key, default) # Return the profile match if the kwarg match was None, as the # lxc.present state will pass these kwargs set to None by default. if kw_overrides_match is None: return profile_match return kw_overrides_match path = select("path") if exists(name, path=path): raise CommandExecutionError(f"Container '{name}' already exists") tvg = select("vgname") vgname = tvg if tvg else __salt__["config.get"]("lxc.vgname") # The 'template' and 'image' params conflict template = select("template") image = select("image") if template and image: raise SaltInvocationError("Only one of 'template' and 'image' is permitted") elif not any((template, image, profile)): raise SaltInvocationError( "At least one of 'template', 'image', and 'profile' is required" ) options = select("options") or {} backing = select("backing") if vgname and not backing: backing = "lvm" lvname = select("lvname") thinpool = select("thinpool") fstype = select("fstype") size = select("size", "1G") zfsroot = select("zfsroot") if backing in ("dir", "overlayfs", "btrfs", "zfs"): fstype = None size = None # some backends won't support some parameters if backing in ("aufs", "dir", "overlayfs", "btrfs"): lvname = vgname = thinpool = None if image: img_tar = __salt__["cp.cache_file"](image) template = os.path.join( os.path.dirname(salt.__file__), "templates", "lxc", "salt_tarball" ) options["imgtar"] = img_tar if path: cmd += f" -P {shlex.quote(path)}" if not os.path.exists(path): os.makedirs(path) if config: cmd += f" -f {config}" if template: cmd += f" -t {template}" if backing: backing = backing.lower() cmd += f" -B {backing}" if backing in ("zfs",): if zfsroot: cmd += f" --zfsroot {zfsroot}" if backing in ("lvm",): if lvname: cmd += f" --lvname {lvname}" if vgname: cmd += f" --vgname {vgname}" if thinpool: cmd += f" --thinpool {thinpool}" if backing not in ("dir", "overlayfs"): if fstype: cmd += f" --fstype {fstype}" if size: cmd += f" --fssize {size}" if options: if template == "download": missing_deps = [x for x in download_template_deps if x not in options] if missing_deps: raise SaltInvocationError( "Missing params in 'options' dict: {}".format( ", ".join(missing_deps) ) ) cmd += " --" for key, val in options.items(): cmd += f" --{key} {val}" ret = __salt__["cmd.run_all"](cmd, python_shell=False) # please do not merge extra conflicting stuff # inside those two line (ret =, return) return _after_ignition_network_profile( cmd, ret, name, network_profile, path, nic_opts )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init(\n name,\n config=None,\n cpuset=None,\n cpushare=None,\n memory=None,\n profile=None,\n network_profile=None,\n nic_opts=None,\n cpu=None,\n autostart=True,\n password=None,\n password_encrypted=None,\n users=None,\n dnsservers=None,\n searchdomains=None,\n bridge=None,\n gateway=None,\n pub_key=None,\n priv_key=None,\n force_install=False,\n unconditional_install=False,\n bootstrap_delay=None,\n bootstrap_args=None,\n bootstrap_shell=None,\n bootstrap_url=None,\n **kwargs,\n):\n ret = {\"name\": name, \"changes\": {}}\n\n profile = get_container_profile(copy.deepcopy(profile))\n if not network_profile:\n network_profile = profile.get(\"network_profile\")\n if not network_profile:\n network_profile = DEFAULT_NIC\n\n # Changes is a pointer to changes_dict['init']. This method is used so that\n # we can have a list of changes as they are made, providing an ordered list\n # of things that were changed.\n changes_dict = {\"init\": []}\n changes = changes_dict.get(\"init\")\n\n if users is None:\n users = []\n dusers = [\"root\"]\n for user in dusers:\n if user not in users:\n users.append(user)\n\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n bpath = get_root_path(path)\n state_pre = state(name, path=path)\n tvg = select(\"vgname\")\n vgname = tvg if tvg else __salt__[\"config.get\"](\"lxc.vgname\")\n start_ = select(\"start\", True)\n autostart = select(\"autostart\", autostart)\n seed = select(\"seed\", True)\n install = select(\"install\", True)\n seed_cmd = select(\"seed_cmd\")\n salt_config = _get_salt_config(config, **kwargs)\n approve_key = select(\"approve_key\", True)\n clone_from = select(\"clone_from\")\n\n # If using a volume group then set up to make snapshot cow clones\n if vgname and not clone_from:\n try:\n kwargs[\"vgname\"] = vgname\n clone_from = _get_base(profile=profile, **kwargs)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n if not kwargs.get(\"snapshot\") is False:\n kwargs[\"snapshot\"] = True\n does_exist = exists(name, path=path)\n to_reboot = False\n remove_seed_marker = False\n if does_exist:\n pass\n elif clone_from:\n remove_seed_marker = True\n try:\n clone(name, clone_from, profile=profile, **kwargs)\n changes.append({\"create\": \"Container cloned\"})\n except (SaltInvocationError, CommandExecutionError) as exc:\n if \"already exists\" in exc.strerror:\n changes.append({\"create\": \"Container already exists\"})\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n cfg = _LXCConfig(\n name=name,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n path=path,\n gateway=gateway,\n autostart=autostart,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n old_chunks = read_conf(cfg.path, out_format=\"commented\")\n cfg.write()\n chunks = read_conf(cfg.path, out_format=\"commented\")\n if old_chunks != chunks:\n to_reboot = True\n else:\n remove_seed_marker = True\n cfg = _LXCConfig(\n network_profile=network_profile,\n nic_opts=nic_opts,\n cpuset=cpuset,\n path=path,\n bridge=bridge,\n gateway=gateway,\n autostart=autostart,\n cpushare=cpushare,\n memory=memory,\n )\n with cfg.tempfile() as cfile:\n try:\n create(name, config=cfile.name, profile=profile, **kwargs)\n changes.append({\"create\": \"Container created\"})\n except (SaltInvocationError, CommandExecutionError) as exc:\n if \"already exists\" in exc.strerror:\n changes.append({\"create\": \"Container already exists\"})\n else:\n ret[\"comment\"] = exc.strerror\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n cpath = os.path.join(bpath, name, \"config\")\n old_chunks = []\n if os.path.exists(cpath):\n old_chunks = read_conf(cpath, out_format=\"commented\")\n new_cfg = _config_list(\n conf_tuples=old_chunks,\n cpu=cpu,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n if new_cfg:\n edit_conf(cpath, out_format=\"commented\", lxc_config=new_cfg)\n chunks = read_conf(cpath, out_format=\"commented\")\n if old_chunks != chunks:\n to_reboot = True\n\n # last time to be sure any of our property is correctly applied\n cfg = _LXCConfig(\n name=name,\n network_profile=network_profile,\n nic_opts=nic_opts,\n bridge=bridge,\n path=path,\n gateway=gateway,\n autostart=autostart,\n cpuset=cpuset,\n cpushare=cpushare,\n memory=memory,\n )\n old_chunks = []\n if os.path.exists(cfg.path):\n old_chunks = read_conf(cfg.path, out_format=\"commented\")\n cfg.write()\n chunks = read_conf(cfg.path, out_format=\"commented\")\n if old_chunks != chunks:\n changes.append({\"config\": \"Container configuration updated\"})\n to_reboot = True\n\n if to_reboot:\n try:\n stop(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n if not does_exist or (does_exist and state(name, path=path) != \"running\"):\n try:\n start(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n if changes:\n ret[\"changes\"] = changes_dict\n return ret\n\n if remove_seed_marker:\n run(\n name,\n f\"rm -f '{SEED_MARKER}'\",\n path=path,\n chroot_fallback=False,\n python_shell=False,\n )\n\n # set the default user/password, only the first time\n if ret.get(\"result\", True) and password:\n gid = \"/.lxc.initial_pass\"\n gids = [gid, \"/lxc.initial_pass\", f\"/.lxc.{name}.initial_pass\"]\n if not any(\n retcode(\n name,\n f'test -e \"{x}\"',\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ):\n # think to touch the default user generated by default templates\n # which has a really unsecure passwords...\n # root is defined as a member earlier in the code\n for default_user in [\"ubuntu\"]:\n if (\n default_user not in users\n and retcode(\n name,\n f\"id {default_user}\",\n python_shell=False,\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n == 0\n ):\n users.append(default_user)\n for user in users:\n try:\n cret = set_password(\n name,\n users=[user],\n path=path,\n password=password,\n encrypted=password_encrypted,\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n msg = f\"{user}: Failed to set password\" + exc.strerror\n # only hardfail in unrecoverable situation:\n # root cannot be setted up\n if user == \"root\":\n ret[\"comment\"] = msg\n ret[\"result\"] = False\n else:\n log.debug(msg)\n if ret.get(\"result\", True):\n changes.append({\"password\": \"Password(s) updated\"})\n if (\n retcode(\n name,\n 'sh -c \\'touch \"{0}\"; test -e \"{0}\"\\''.format(gid),\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n != 0\n ):\n ret[\"comment\"] = \"Failed to set password marker\"\n changes[-1][\"password\"] += \". \" + ret[\"comment\"] + \".\"\n ret[\"result\"] = False\n\n # set dns servers if any, only the first time\n if ret.get(\"result\", True) and dnsservers:\n # retro compatibility, test also old markers\n gid = \"/.lxc.initial_dns\"\n gids = [gid, \"/lxc.initial_dns\", f\"/lxc.{name}.initial_dns\"]\n if not any(\n retcode(\n name,\n f'test -e \"{x}\"',\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ):\n try:\n set_dns(\n name, path=path, dnsservers=dnsservers, searchdomains=searchdomains\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Failed to set DNS: \" + exc.strerror\n ret[\"result\"] = False\n else:\n changes.append({\"dns\": \"DNS updated\"})\n if (\n retcode(\n name,\n 'sh -c \\'touch \"{0}\"; test -e \"{0}\"\\''.format(gid),\n chroot_fallback=True,\n path=path,\n ignore_retcode=True,\n )\n != 0\n ):\n ret[\"comment\"] = \"Failed to set DNS marker\"\n changes[-1][\"dns\"] += \". \" + ret[\"comment\"] + \".\"\n ret[\"result\"] = False\n\n # retro compatibility, test also old markers\n if remove_seed_marker:\n run(name, f\"rm -f '{SEED_MARKER}'\", path=path, python_shell=False)\n gid = \"/.lxc.initial_seed\"\n gids = [gid, \"/lxc.initial_seed\"]\n if any(\n retcode(\n name,\n f\"test -e {x}\",\n path=path,\n chroot_fallback=True,\n ignore_retcode=True,\n )\n == 0\n for x in gids\n ) or not ret.get(\"result\", True):\n pass\n elif seed or seed_cmd:\n if seed:\n try:\n result = bootstrap(\n name,\n config=salt_config,\n path=path,\n approve_key=approve_key,\n pub_key=pub_key,\n priv_key=priv_key,\n install=install,\n force_install=force_install,\n unconditional_install=unconditional_install,\n bootstrap_delay=bootstrap_delay,\n bootstrap_url=bootstrap_url,\n bootstrap_shell=bootstrap_shell,\n bootstrap_args=bootstrap_args,\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Bootstrap failed: \" + exc.strerror\n ret[\"result\"] = False\n else:\n if not result:\n ret[\n \"comment\"\n ] = \"Bootstrap failed, see minion log for more information\"\n ret[\"result\"] = False\n else:\n changes.append({\"bootstrap\": \"Container successfully bootstrapped\"})\n elif seed_cmd:\n try:\n result = __salt__[seed_cmd](\n info(name, path=path)[\"rootfs\"], name, salt_config\n )\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = \"Bootstrap via seed_cmd '{}' failed: {}\".format(\n seed_cmd, exc.strerror\n )\n ret[\"result\"] = False\n else:\n if not result:\n ret[\"comment\"] = (\n \"Bootstrap via seed_cmd '{}' failed, \"\n \"see minion log for more information \".format(seed_cmd)\n )\n ret[\"result\"] = False\n else:\n changes.append(\n {\n \"bootstrap\": (\n \"Container successfully bootstrapped \"\n \"using seed_cmd '{}'\".format(seed_cmd)\n )\n }\n )\n\n if ret.get(\"result\", True) and not start_:\n try:\n stop(name, path=path)\n except (SaltInvocationError, CommandExecutionError) as exc:\n ret[\"comment\"] = f\"Unable to stop container: {exc}\"\n ret[\"result\"] = False\n\n state_post = state(name, path=path)\n if state_pre != state_post:\n changes.append({\"state\": {\"old\": state_pre, \"new\": state_post}})\n\n if ret.get(\"result\", True):\n ret[\"comment\"] = f\"Container '{name}' successfully initialized\"\n ret[\"result\"] = True\n if changes:\n ret[\"changes\"] = changes_dict\n return ret", "def _create_profile(self, user, profile_dir):\n log.info(\"Writing IPython cluster config files\")\n self._master.ssh.switch_user(user)\n self._master.ssh.execute(\"rm -rf '%s'\" % profile_dir)\n self._master.ssh.execute('ipython profile create')\n self._master.ssh.switch_user('root')", "def _create_profile(self, user, profile_dir):\n log.info(\"Writing IPython cluster config files\")\n self._master.ssh.switch_user(user)\n self._master.ssh.execute(\"rm -rf '%s'\" % profile_dir)\n self._master.ssh.execute('ipython profile create')\n # Add startup files\n\n self._master.ssh.switch_user('root')", "def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)", "def create_container_config(messaging_conf_path):\n\n confdict = parse_messaging_config(messaging_conf_path)\n\n from ion.core.cc import service\n config = service.Options()\n config['broker_host'] = confdict[KEY_BROKERHOST]\n config['broker_vhost'] = \"/\"\n config['no_shell'] = True\n config['args'] = 'sysname=%s' % confdict[KEY_SYSNAME]\n config['script'] = None\n\n if confdict[KEY_BROKERCREDFILE]:\n config['broker_credfile'] = confdict[KEY_BROKERCREDFILE]\n\n return config", "def create():\n config_file = get_config_file()\n if os.path.isfile(config_file):\n logger.info(\"The config file already exists at '%s'\", config_file)\n return\n _create_config()\n logger.info(\"Created config file at '%s'\", config_file)", "def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)", "def create(self):\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']", "def create(self, config):\n config_name = config.get(\"LaunchConfigurationName\", self._name)\n assert config_name == self._name, \"Config name mismatch {} {}\".format(config_name, self._name)\n config[\"LaunchConfigurationName\"] = self._name\n self._client.create_launch_configuration(**config)", "def post(self, run=False, **container_dict):\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n policy.enforce(context, \"container:create\",\n action=\"container:create\")\n\n try:\n run = strutils.bool_from_string(run, strict=True)\n except ValueError:\n msg = _('Valid run values are true, false, 0, 1, yes and no')\n raise exception.InvalidValue(msg)\n try:\n container_dict['tty'] = strutils.bool_from_string(\n container_dict.get('tty', False), strict=True)\n container_dict['stdin_open'] = strutils.bool_from_string(\n container_dict.get('stdin_open', False), strict=True)\n except ValueError:\n msg = _('Valid tty and stdin_open values are ''true'', '\n '\"false\", True, False, \"True\" and \"False\"')\n raise exception.InvalidValue(msg)\n\n # NOTE(mkrai): Intent here is to check the existence of image\n # before proceeding to create container. If image is not found,\n # container create will fail with 400 status.\n images = compute_api.image_search(context, container_dict['image'],\n True)\n if not images:\n raise exception.ImageNotFound(container_dict['image'])\n container_dict['project_id'] = context.project_id\n container_dict['user_id'] = context.user_id\n name = container_dict.get('name') or \\\n self._generate_name_for_container()\n container_dict['name'] = name\n if container_dict.get('memory'):\n container_dict['memory'] = \\\n str(container_dict['memory']) + 'M'\n if container_dict.get('restart_policy'):\n self._check_for_restart_policy(container_dict)\n container_dict['status'] = fields.ContainerStatus.CREATING\n new_container = objects.Container(context, **container_dict)\n new_container.create(context)\n\n if run:\n compute_api.container_run(context, new_container)\n else:\n compute_api.container_create(context, new_container)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('containers',\n new_container.uuid)\n pecan.response.status = 202\n return view.format_container(pecan.request.host_url, new_container)", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def clone(name, orig, profile=None, network_profile=None, nic_opts=None, **kwargs):\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, None)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is None:\n return profile_match\n return kw_overrides_match\n\n path = select(\"path\")\n if exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' already exists\")\n\n _ensure_exists(orig, path=path)\n if state(orig, path=path) != \"stopped\":\n raise CommandExecutionError(f\"Container '{orig}' must be stopped to be cloned\")\n\n backing = select(\"backing\")\n snapshot = select(\"snapshot\")\n if backing in (\"dir\",):\n snapshot = False\n if not snapshot:\n snapshot = \"\"\n else:\n snapshot = \"-s\"\n\n size = select(\"size\", \"1G\")\n if backing in (\"dir\", \"overlayfs\", \"btrfs\"):\n size = None\n # LXC commands and options changed in 2.0 - CF issue #34086 for details\n if Version(version()) >= Version(\"2.0\"):\n # https://linuxcontainers.org/lxc/manpages//man1/lxc-copy.1.html\n cmd = \"lxc-copy\"\n cmd += f\" {snapshot} -n {orig} -N {name}\"\n else:\n # https://linuxcontainers.org/lxc/manpages//man1/lxc-clone.1.html\n cmd = \"lxc-clone\"\n cmd += f\" {snapshot} -o {orig} -n {name}\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n if not os.path.exists(path):\n os.makedirs(path)\n if backing:\n backing = backing.lower()\n cmd += f\" -B {backing}\"\n if backing not in (\"dir\", \"overlayfs\"):\n if size:\n cmd += f\" -L {size}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n # please do not merge extra conflicting stuff\n # inside those two line (ret =, return)\n return _after_ignition_network_profile(\n cmd, ret, name, network_profile, path, nic_opts\n )", "def create(dockerfile):\n\n path = os.path.dirname(dockerfile)\n\n container_name = input('Enter container name: ')\n port = input('Enter port number to map TCP port 5000 in the container, to a port on the Docker host: ')\n\n try:\n image = CLIENT.images.build(path=path, dockerfile=dockerfile, tag=\"my_app_image\")\n # Run a container and map TCP port 5000 in the container to a given port on the Docker host.\n container = CLIENT.containers.run('my_app_image', detach=True, ports={'5000/tcp': port},\n name=container_name)\n click.secho(\"Container created with name: {}. App is running \"\n \"on http://0.0.0.0:{}/ on the host.\"\n .format(container_name, port), bg='blue', fg='white')\n except (docker.errors.APIError, TypeError, OSError) as err:\n print(err)", "def create_container(ContainerName=None, Tags=None):\n pass", "def service_create(container, cconfig, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # We prefix the SystemD service so we can identify them better:\n # e.g. systemctl list-unit-files | grep tripleo_\n # It'll help to not conflict when rpms are installed on the host and\n # have the same service name as their container name.\n # For example haproxy rpm and haproxy container would have the same\n # service name so the prefix will help to not having this conflict\n # when removing the rpms during a cleanup by the operator.\n service = 'tripleo_' + container\n\n wants = \" \".join(systemctl.format_name(str(x)) for x in\n cconfig.get('depends_on', []))\n\n restart = cconfig.get('restart', 'always')\n stop_grace_period = cconfig.get('stop_grace_period', '10')\n\n # Please refer to systemd.exec documentation for those entries\n # https://www.freedesktop.org/software/systemd/man/systemd.exec.html\n sys_exec = cconfig.get('systemd_exec_flags', {})\n\n # SystemD doesn't have the equivalent of docker unless-stopped.\n # Let's force 'always' so containers aren't restarted when stopped by\n # systemd, but restarted when in failure. Also this code is only for\n # podman now, so nothing changed for Docker deployments.\n if restart == 'unless-stopped':\n restart = 'always'\n\n # If the service depends on other services, it must be stopped\n # in a specific order. The host can be configured to prevent\n # systemd from stopping the associated systemd scopes too early,\n # so make sure to generate the start command accordingly.\n if (len(cconfig.get('depends_on', [])) > 0 and\n os.path.exists(DROP_IN_MARKER_FILE)):\n start_cmd = '/usr/libexec/paunch-start-podman-container %s' % container\n else:\n start_cmd = '/usr/bin/podman start %s' % container\n\n sysd_unit_f = sysdir + systemctl.format_name(service)\n log.debug('Creating systemd unit file: %s' % sysd_unit_f)\n s_config = {\n 'name': container,\n 'start_cmd': start_cmd,\n 'wants': wants,\n 'restart': restart,\n 'stop_grace_period': stop_grace_period,\n 'sys_exec': '\\n'.join(['%s=%s' % (x, y) for x, y in sys_exec.items()]),\n }\n # Ensure we don't have some trailing .requires directory and content for\n # this service\n if os.path.exists(sysd_unit_f + '.requires'):\n shutil.rmtree(sysd_unit_f + '.requires')\n\n with open(sysd_unit_f, 'w') as unit_file:\n os.chmod(unit_file.name, 0o644)\n unit_file.write(\"\"\"[Unit]\nDescription=%(name)s container\nAfter=paunch-container-shutdown.service\nWants=%(wants)s\n[Service]\nRestart=%(restart)s\nExecStart=%(start_cmd)s\nExecReload=/usr/bin/podman kill --signal HUP %(name)s\nExecStop=/usr/bin/podman stop -t %(stop_grace_period)s %(name)s\nKillMode=none\nType=forking\nPIDFile=/var/run/%(name)s.pid\n%(sys_exec)s\n[Install]\nWantedBy=multi-user.target\"\"\" % s_config)\n try:\n systemctl.daemon_reload()\n systemctl.enable(service, now=True)\n except systemctl.SystemctlException:\n log.exception(\"systemctl failed\")\n raise", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def new(self, new_type, name):\n valid_types = ['image', 'rpm']\n new_type = new_type.lower()\n if new_type not in valid_types:\n raise ValueError('Type must be one of {}'.format(','.join(valid_types)))\n\n new_type = new_type + 's'\n template = os.path.join(self.runtime.metadata_dir, 'example', new_type, 'template.yml')\n new_config = os.path.join(self.runtime.group_dir, new_type, '{}.yml'.format(name))\n\n if os.path.exists(new_config):\n raise ValueError('{} already exists!'.format(new_config))\n\n shutil.copyfile(template, new_config)\n\n config_log = self._load_config_log()\n config_log.setdefault('new', []).append(new_config)\n\n self._save_config_log(config_log)\n\n self.runtime.logger.info(\"New config template created: \\n{}\".format(new_config))", "def __create_cont(self, path, filesystem, cont_stat, component_number):\n try:\n self.logger.debug('Create container interface called')\n status_obj = Status()\n cont_id = \"container\"\n #cont_id = get_container_id()\n tmp_path = '%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, TMPDIR, cont_id,component_number)\n self.asyn_helper.call(\"create_container\", \\\n tmp_path, path, cont_stat, status_obj)\n return status_obj\n except Exception as err:\n self.logger.error(('create_container for %(con_dir)s failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path, \n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def set_default_profile(self):\n profile = textwrap.dedent(\n \"\"\"\n config:\n boot.autostart: \"true\"\n description: Default LXD profile\n devices:\n eth0:\n name: eth0\n nictype: bridged\n parent: conjureup1\n type: nic\n eth1:\n name: eth1\n nictype: bridged\n parent: conjureup0\n type: nic\n root:\n path: /\n pool: default\n type: disk\n name: default\n \"\"\")\n with NamedTemporaryFile(mode='w', encoding='utf-8',\n delete=False) as tempf:\n utils.spew(tempf.name, profile)\n out = utils.run_script(\n 'cat {} |conjure-up.lxc profile edit default'.format(\n tempf.name))\n if out.returncode != 0:\n raise Exception(\"Problem setting default profile: {}\".format(\n out))", "def reconfigure(\n name,\n cpu=None,\n cpuset=None,\n cpushare=None,\n memory=None,\n profile=None,\n network_profile=None,\n nic_opts=None,\n bridge=None,\n gateway=None,\n autostart=None,\n utsname=None,\n rootfs=None,\n path=None,\n **kwargs,\n):\n changes = {}\n cpath = get_root_path(path)\n path = os.path.join(cpath, name, \"config\")\n ret = {\n \"name\": name,\n \"comment\": f\"config for {name} up to date\",\n \"result\": True,\n \"changes\": changes,\n }\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n if nic_opts is not None and not network_profile:\n network_profile = DEFAULT_NIC\n\n if autostart is not None:\n autostart = select(\"autostart\", autostart)\n else:\n autostart = \"keep\"\n if not utsname:\n utsname = select(\"utsname\", utsname)\n if os.path.exists(path):\n old_chunks = read_conf(path, out_format=\"commented\")\n make_kw = salt.utils.odict.OrderedDict(\n [\n (\"utsname\", utsname),\n (\"rootfs\", rootfs),\n (\"autostart\", autostart),\n (\"cpu\", cpu),\n (\"gateway\", gateway),\n (\"cpuset\", cpuset),\n (\"cpushare\", cpushare),\n (\"network_profile\", network_profile),\n (\"nic_opts\", nic_opts),\n (\"bridge\", bridge),\n ]\n )\n # match 0 and none as memory = 0 in lxc config is harmful\n if memory:\n make_kw[\"memory\"] = memory\n kw = salt.utils.odict.OrderedDict()\n for key, val in make_kw.items():\n if val is not None:\n kw[key] = val\n new_cfg = _config_list(conf_tuples=old_chunks, **kw)\n if new_cfg:\n edit_conf(path, out_format=\"commented\", lxc_config=new_cfg)\n chunks = read_conf(path, out_format=\"commented\")\n if old_chunks != chunks:\n ret[\"comment\"] = f\"{name} lxc config updated\"\n if state(name, path=path) == \"running\":\n cret = reboot(name, path=path)\n ret[\"result\"] = cret[\"result\"]\n return ret", "def deploy_component(profile, image, instance_name, docker_config, should_wait=False,\n logins=[]):\n ports = docker_config.get(\"ports\", None)\n hcp = doc.add_host_config_params_ports(ports=ports)\n volumes = docker_config.get(\"volumes\", None)\n hcp = doc.add_host_config_params_volumes(volumes=volumes, host_config_params=hcp)\n # Thankfully passing in an IP will return back an IP\n dh = profile.docker_host.split(\":\")[0]\n _, _, dhips = socket.gethostbyname_ex(dh)\n\n if dhips:\n hcp = doc.add_host_config_params_dns(dhips[0], hcp)\n else:\n raise DockerConstructionError(\"Could not resolve the docker hostname:{0}\".format(dh))\n\n envs = build_envs(profile, docker_config, instance_name)\n client = get_docker_client(profile, logins=logins)\n\n config = doc.create_container_config(client, image, envs, hcp)\n\n return _run_container(client, config, name=instance_name, wait=should_wait)", "def create(ctx, **kwargs):\n # creates and activates pf9-express config file\n\n pf9_exp_conf_dir = ctx.obj['pf9_exp_conf_dir']\n \n # Backup existing config if one exist\n if os.path.exists(pf9_exp_conf_dir + 'express.conf'):\n with open(pf9_exp_conf_dir + 'express.conf', 'r') as current:\n lines = current.readlines()\n current.close()\n for line in lines:\n if 'config_name|' in line:\n line = line.strip()\n name = line.replace('config_name|','')\n\n filename = name + '.conf'\n shutil.copyfile(pf9_exp_conf_dir + 'express.conf', pf9_exp_conf_dir + filename)\n\n if not os.path.exists(pf9_exp_conf_dir):\n try:\n access_rights = 0o700\n os.makedirs(pf9_exp_conf_dir, access_rights)\n except Exception:\n click.echo(\"Creation of the directory %s failed\" % pf9_exp_conf_dir)\n else:\n click.echo(\"Successfully created the directory %s \" % pf9_exp_conf_dir)\n\n with open(pf9_exp_conf_dir + 'express.conf', 'w') as file:\n for k,v in ctx.params.items():\n file.write(k + '|' + str(v) + '\\n')\n click.echo('Successfully wrote Platform9 management plane configuration')", "def create(\n name: str,\n from_name: str = typer.Option(None, \"--from\", help=\"Copy an existing profile.\"),\n):\n\n profiles = prefect.settings.load_profiles()\n if name in profiles:\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n [red]Profile {name!r} already exists.[/red]\n To create a new profile, remove the existing profile first:\n\n prefect profile delete {name!r}\n \"\"\"\n ).strip()\n )\n raise typer.Exit(1)\n\n if from_name:\n if from_name not in profiles:\n exit_with_error(f\"Profile {from_name!r} not found.\")\n\n # Create a copy of the profile with a new name and add to the collection\n profiles.add_profile(profiles[from_name].copy(update={\"name\": name}))\n else:\n profiles.add_profile(prefect.settings.Profile(name=name, settings={}))\n\n prefect.settings.save_profiles(profiles)\n\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n Created profile with properties:\n name - {name!r}\n from name - {from_name or None}\n\n Use created profile for future, subsequent commands:\n prefect profile use {name!r}\n\n Use created profile temporarily for a single command:\n prefect -p {name!r} config view\n \"\"\"\n )\n )", "def create_lxd_container(public_key=None, name=\"test_name\"):\n container = None\n\n # Format name so it's valid\n name = name.replace(\"_\", \"-\").replace(\".\", \"\")\n\n client = get_lxd_client()\n if not client:\n raise Exception(\"Unable to connect to LXD\")\n\n test_machine = \"test-{}-{}\".format(\n uuid.uuid4().hex[-4:],\n name,\n )\n\n private_key_path, public_key_path = find_n2vc_ssh_keys()\n\n try:\n # create profile w/cloud-init and juju ssh key\n if not public_key:\n public_key = \"\"\n with open(public_key_path, \"r\") as f:\n public_key = f.readline()\n\n client.profiles.create(\n test_machine,\n config={\n 'user.user-data': '#cloud-config\\nssh_authorized_keys:\\n- {}'.format(public_key)},\n devices={\n 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},\n 'eth0': {\n 'nictype': 'bridged',\n 'parent': 'lxdbr0',\n 'type': 'nic'\n }\n }\n )\n except Exception as ex:\n debug(\"Error creating lxd profile {}: {}\".format(test_machine, ex))\n raise ex\n\n try:\n # create lxc machine\n config = {\n 'name': test_machine,\n 'source': {\n 'type': 'image',\n 'alias': 'xenial',\n 'mode': 'pull',\n 'protocol': 'simplestreams',\n 'server': 'https://cloud-images.ubuntu.com/releases',\n },\n 'profiles': [test_machine],\n }\n container = client.containers.create(config, wait=True)\n container.start(wait=True)\n except Exception as ex:\n debug(\"Error creating lxd container {}: {}\".format(test_machine, ex))\n # This is a test-ending failure.\n raise ex\n\n def wait_for_network(container, timeout=30):\n \"\"\"Wait for eth0 to have an ipv4 address.\"\"\"\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if 'eth0' in container.state().network:\n addresses = container.state().network['eth0']['addresses']\n if len(addresses) > 0:\n if addresses[0]['family'] == 'inet':\n return addresses[0]\n return None\n\n try:\n wait_for_network(container)\n except Exception as ex:\n debug(\n \"Error waiting for container {} network: {}\".format(\n test_machine,\n ex,\n )\n )\n\n try:\n waitcount = 0\n while waitcount <= 5:\n if is_sshd_running(container):\n break\n waitcount += 1\n time.sleep(1)\n if waitcount >= 5:\n debug(\"couldn't detect sshd running\")\n raise Exception(\"Unable to verify container sshd\")\n\n except Exception as ex:\n debug(\n \"Error checking sshd status on {}: {}\".format(\n test_machine,\n ex,\n )\n )\n\n # HACK: We need to give sshd a chance to bind to the interface,\n # and pylxd's container.execute seems to be broken and fails and/or\n # hangs trying to properly check if the service is up.\n (exit_code, stdout, stderr) = container.execute([\n 'ping',\n '-c', '5', # Wait for 5 ECHO_REPLY\n '8.8.8.8', # Ping Google's public DNS\n '-W', '15', # Set a 15 second deadline\n ])\n if exit_code > 0:\n # The network failed\n raise Exception(\"Unable to verify container network\")\n\n return container", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def qemu_img_create(config, size_mb):\n\n opts = [\n \"key-secret=sec0\",\n \"iter-time=10\",\n \"cipher-alg=%s-%d\" % (config.cipher, config.keylen),\n \"cipher-mode=%s\" % config.mode,\n \"ivgen-alg=%s\" % config.ivgen,\n \"hash-alg=%s\" % config.hash,\n ]\n if config.ivgen_hash is not None:\n opts.append(\"ivgen-hash-alg=%s\" % config.ivgen_hash)\n\n args = [\"create\", \"-f\", \"luks\",\n \"--object\",\n (\"secret,id=sec0,data=%s,format=base64\" %\n config.first_password_base64()),\n \"-o\", \",\".join(opts),\n config.image_path(),\n \"%dM\" % size_mb]\n\n iotests.log(\"qemu-img \" + \" \".join(args), filters=[iotests.filter_test_dir])\n iotests.log(iotests.qemu_img_pipe(*args), filters=[iotests.filter_test_dir])", "def make_config(self, cfg_t, cfgname):\n\n if cfg_t == 'pool':\n prop_d = MBRAT_DEF_POOL_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n args = self._mkcfg_args( cfgname, MBRAT_POOLSD, ['data',], prop_d ) \n\n elif cfg_t == 'poolkey':\n targetd = self.get_cfg_parentd(cfg_t)\n prop_d = MBRAT_DEF_POOLKEY_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n args = self._mkcfg_args( cfgname, targetd, [], prop_d )\n\n elif cfg_t == 'profile':\n prop_d = { cfg_t: {'info': \"\", 'name': cfgname,}, }\n args = self._mkcfg_args( cfgname, MBRAT_PROFILESD, \n ['data', 'public',], prop_d )\n\n elif cfg_t == 'privkey':\n targetd = self.get_cfg_parentd(cfg_t)\n prop_d = MBRAT_DEF_PRIVKEY_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n prop_d['pool'].update( {'name': \"{}_pool\".format(cfgname),} )\n args = self._mkcfg_args( cfgname, targetd, ['public',], prop_d )\n\n elif cfg_t == 'pubkey':\n return self._mkcfg_pubkey(cfgname)\n\n # now make the new config dir...\n return self._mkcfg(cfg_t, args)", "def ddtest_create_generic_container_w_name(self, name=None):\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='generic')", "def create_config_file(name):\n config = {}\n config['name'] = name\n to_dir = os.getcwd() + '/' + name\n with open(os.path.join(to_dir, 'configuration.json'), 'w') as config_file:\n json.dump(config, config_file)" ]
[ "0.6075936", "0.5841215", "0.5806689", "0.5630651", "0.55597794", "0.55063236", "0.5456116", "0.5427631", "0.5425852", "0.5322353", "0.5300968", "0.5300968", "0.52974516", "0.5293702", "0.5282008", "0.5237458", "0.5212504", "0.51788056", "0.5174497", "0.5166467", "0.51633656", "0.51390636", "0.51390177", "0.51263237", "0.5119371", "0.51185006", "0.5116842", "0.5116248", "0.5105587", "0.510148" ]
0.7305001
0
Create a new container as a clone of another container name Name of the container orig Name of the original container to be cloned profile Profile to use in container cloning (see
def clone(name, orig, profile=None, network_profile=None, nic_opts=None, **kwargs): profile = get_container_profile(copy.deepcopy(profile)) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, None) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is None: return profile_match return kw_overrides_match path = select("path") if exists(name, path=path): raise CommandExecutionError(f"Container '{name}' already exists") _ensure_exists(orig, path=path) if state(orig, path=path) != "stopped": raise CommandExecutionError(f"Container '{orig}' must be stopped to be cloned") backing = select("backing") snapshot = select("snapshot") if backing in ("dir",): snapshot = False if not snapshot: snapshot = "" else: snapshot = "-s" size = select("size", "1G") if backing in ("dir", "overlayfs", "btrfs"): size = None # LXC commands and options changed in 2.0 - CF issue #34086 for details if Version(version()) >= Version("2.0"): # https://linuxcontainers.org/lxc/manpages//man1/lxc-copy.1.html cmd = "lxc-copy" cmd += f" {snapshot} -n {orig} -N {name}" else: # https://linuxcontainers.org/lxc/manpages//man1/lxc-clone.1.html cmd = "lxc-clone" cmd += f" {snapshot} -o {orig} -n {name}" if path: cmd += f" -P {shlex.quote(path)}" if not os.path.exists(path): os.makedirs(path) if backing: backing = backing.lower() cmd += f" -B {backing}" if backing not in ("dir", "overlayfs"): if size: cmd += f" -L {size}" ret = __salt__["cmd.run_all"](cmd, python_shell=False) # please do not merge extra conflicting stuff # inside those two line (ret =, return) return _after_ignition_network_profile( cmd, ret, name, network_profile, path, nic_opts )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned", "def catalog_clone(self, args):\n try:\n catalog = self.server.connect_ermrest(args.id)\n print(\"Attempting to clone catalog %s into new catalog. Please wait...\" % args.id)\n dest_cat = catalog.clone_catalog(copy_data=args.no_copy_data,\n copy_annotations=args.no_copy_annotations,\n copy_policy=args.no_copy_policy,\n truncate_after=args.no_truncate_after,\n exclude_schemas=args.exclude_schemas)\n print(\"Catalog successfully cloned into new catalog: %s\" % dest_cat.catalog_id)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def create_cloned_volume(self, volume, src_vref):\n clone_name = self.get_volume_name(volume.id)\n src_name = self.get_volume_name(src_vref.id)\n src_vol = self.client.search(\"volumes\", name=src_name)\n src_map = self.client.search(\"mappings\", volume=src_vol)\n src_attach_info = dest_attach_info = None\n if src_map.total != 0:\n msg = _(\"K2 driver does not support clone of an attached volume. \"\n \"To get this done, create a snapshot from the attached \"\n \"volume and then create a volume from the snapshot.\")\n LOG.error(msg)\n raise KaminarioCinderDriverException(reason=msg)\n try:\n properties = volume_utils.brick_get_connector_properties(\n self.configuration.use_multipath_for_image_xfer,\n self.configuration.enforce_multipath_for_image_xfer)\n conn = self.initialize_connection(src_vref, properties)\n src_attach_info = self._connect_device(conn)\n self.create_volume(volume)\n conn = self.initialize_connection(volume, properties)\n dest_attach_info = self._connect_device(conn)\n volume_utils.copy_volume(src_attach_info['device']['path'],\n dest_attach_info['device']['path'],\n src_vref.size * units.Ki,\n self.configuration.volume_dd_blocksize,\n sparse=True)\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(volume, properties)\n self.terminate_connection(src_vref, properties)\n except Exception as ex:\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(src_vref, properties)\n self.terminate_connection(volume, properties)\n self.delete_volume(volume)\n LOG.exception(\"Create a clone: %s failed.\", clone_name)\n raise KaminarioCinderDriverException(reason=ex)", "def duplicate_in_memory(self, new_name='', add_to_registry=False):\n if new_name is '':\n new_name = self.layer.name() + ' TMP'\n\n if self.layer.type() == QgsMapLayer.VectorLayer:\n v_type = self.layer.wkbType()\n if v_type == QGis.WKBPoint:\n type_str = \"point\"\n elif v_type == QGis.WKBLineString:\n type_str = \"linestring\"\n elif v_type == QGis.WKBPolygon:\n type_str = \"polygon\"\n elif v_type == QGis.WKBMultiPoint:\n type_str = \"multipoint\"\n elif v_type == QGis.WKBMultiLineString:\n type_str = \"multilinestring\"\n elif v_type == QGis.WKBMultiPolygon:\n type_str = \"multipolygon\"\n else:\n raise TypeError('Layer type %s can not be accepted' % v_type)\n else:\n raise RuntimeError('Layer is not a VectorLayer')\n\n crs = self.layer.crs().authid().lower()\n my_uuid = str(uuid.uuid4())\n uri = '%s?crs=%s&index=yes&uuid=%s' % (type_str, crs, my_uuid)\n mem_layer = QgsVectorLayer(uri, new_name, 'memory')\n with LayerEditingManager(mem_layer, 'Duplicating layer', DEBUG):\n mem_provider = mem_layer.dataProvider()\n\n provider = self.layer.dataProvider()\n v_fields = provider.fields()\n\n fields = []\n for i in v_fields:\n fields.append(i)\n\n mem_provider.addAttributes(fields)\n\n for ft in provider.getFeatures():\n mem_provider.addFeatures([ft])\n\n if add_to_registry:\n if mem_layer.isValid():\n QgsMapLayerRegistry.instance().addMapLayer(mem_layer)\n else:\n raise RuntimeError('Layer invalid')\n\n return mem_layer", "def test_clone_scenario(self):\n pass", "def copy_prov(self, orig_name):\n api_page = \"/configuration/object/copy_provisioning_params\"\n url = \"{}{}?{}&UIDARUBA={}\".format(\n self.base_url,\n api_page,\n self.config_path,\n self.uidaruba)\n\n obj = {\"_action\": \"modify\",\n \"ap-name\": orig_name,\n \"copy_provisioning_options\": \"ap-name\"\n }\n\n json_obj = json.loads(json.dumps(obj))\n resp = self.post(url, json_obj)\n print(\"copy_prov_resp: {}\".format(resp.status_code))\n # print(resp.text)", "def create_cloned_volume(self, vol, src_vref):\n self.authenticate_user()\n name = self._get_volume_name(vol)\n srcname = self._get_vipr_volume_name(src_vref)\n number_of_volumes = 1\n\n try:\n if(src_vref['consistencygroup_id']):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Clone can't be taken individually on a volume\" + \\\n \" that is part of a Consistency Group\")\n except AttributeError as e:\n LOG.info(\"No Consistency Group associated with the volume\")\n\n try:\n (storageresType, storageresTypename) = self.volume_obj.get_storageAttributes(\n srcname, None, None)\n\n resource_id = self.volume_obj.storageResource_query(storageresType,\n srcname,\n None,\n None,\n self.configuration.vipr_project,\n self.configuration.vipr_tenant)\n\n self.volume_obj.clone(\n name,\n number_of_volumes,\n resource_id,\n sync=True)\n\n clone_vol_path = self.configuration.vipr_tenant + \"/\" + self.configuration.vipr_project + \"/\" + name\n detachable = self.volume_obj.is_volume_detachable(clone_vol_path)\n LOG.info(\"Is volume detachable : \" + str(detachable))\n \n #detach it from the source volume immediately after creation\n if(detachable):\n self.volume_obj.volume_clone_detach(\"\",clone_vol_path, True)\n\n except IndexError as e:\n LOG.exception(\"Volume clone detach returned empty task list\")\n\n except vipr_utils.SOSError as e:\n if(e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + name + \": clone failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : {%s} clone failed\") % name)", "def clone(context, request):\n if request.has_permission('create'):\n return {\n 'name': 'clone',\n 'title': 'Clone',\n 'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),\n 'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)),\n }", "def copy(self):\n attrs = {k: self.__dict__[k].copy() for k in self.containers}\n attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared})\n return self.__class__(**attrs)", "def clone(self):\n return self.__class__(self.name, *self)", "def clone_execution_system(self, host_system_id, new_system_id, alloc):\n\n clone_body = {\n 'action': 'CLONE',\n 'id': new_system_id\n }\n\n cloned_sys = self.client.systems.manage(body=clone_body, systemId=host_system_id)\n\n sys = self.validate_exec_system(cloned_sys['id'], alloc)\n\n return sys", "def clone(self):\n return _SALOMERuntime.SalomeContainer_clone(self)", "def create_cloned_volume(self, volume, src_vref):\n LOG.info(_LI('new cloned volume: %s'), volume['name'])\n LOG.info(_LI('source volume for cloning: %s'), src_vref['name'])\n\n snapshot = {'volume_name': src_vref['name'],\n 'volume_id': src_vref['id'],\n 'volume_size': src_vref['size'],\n 'name': self._create_snapshot_name()}\n\n self.create_snapshot(snapshot)\n return self.create_volume_from_snapshot(volume, snapshot,\n method='MOVE')", "def clone(self) -> 'ContainerConfig':\n return deepcopy(self)", "def update_container_name(self, backup, container):\n return container", "def get_container_by_name(self, container_name, is_source):\n if container_name not in self.containers:\n self.containers[container_name] = self.create_container(container_name, is_source)\n return self.containers[container_name]", "def clone_lc(client, lc, name, image_id):\n PARAMS_TO_CLONE = [\n 'KeyName',\n 'SecurityGroups',\n 'ClassicLinkVPCId',\n 'ClassicLinkVPCSecurityGroups',\n 'UserData',\n 'InstanceType',\n 'BlockDeviceMappings',\n 'InstanceMonitoring',\n 'SpotPrice',\n 'IamInstanceProfile',\n 'EbsOptimized',\n 'AssociatePublicIpAddress',\n 'PlacementTenancy',\n ]\n try:\n params = {\n key: lc[key] for key in PARAMS_TO_CLONE if key in lc\n }\n except KeyError:\n print(list(lc.keys()))\n raise\n # We need special handling for kernel ID and ramdisk ID.\n if lc['KernelId']:\n params['KernelId'] = lc['KernelId']\n if lc['RamdiskId']:\n params['RamdiskId'] = lc['RamdiskId']\n client.create_launch_configuration(\n LaunchConfigurationName=name,\n ImageId=image_id,\n **params\n )\n return client.describe_launch_configurations(\n LaunchConfigurationNames=[name],\n )['LaunchConfigurations'][0]", "def clone_pipeline(ctx,\n pipeline_name,\n new_pipeline_name,\n username,\n password,\n ip_address,\n interactive):\n slab_logger.info('Cloning %s to %s' % (pipeline_name, new_pipeline_name))\n if not username:\n username = ctx.get_username()\n if not password:\n password = ctx.get_password(interactive)\n if not password or not username:\n slab_logger.error(\"Username is %s and password is %s. \"\n \"Please, set the correct value for both and retry.\" %\n (username, password))\n sys.exit(1)\n config_xmlurl = \"http://{0}/go/api/admin/config/current.xml\".format(\n ip_address)\n post_config_xmlurl = \"http://{0}/go/api/admin/config.xml\".format(\n ip_address)\n requests.post(config_xmlurl,\n auth=HTTPBasicAuth(username, password))\n\n # Retrieve xml config from server\n (md5, root) = gocd_utils.get_config(config_xmlurl, (username, password))\n new_xml = copy.deepcopy(root)\n\n new_xml = gocd_utils.create_pipeline(new_xml,\n pipeline_name,\n new_pipeline_name)\n\n # Upload results\n new_xmls = ET.tostring(new_xml, encoding='utf-8', method='xml')\n gocd_utils.push_config(post_config_xmlurl, md5,\n new_xmls, (username, password))", "def copy(self):\n return Costume(self.name, self.image, self.rotation_center)", "def test_clone_deployment(self):\n pass", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def ddtest_create_rsa_container_w_name(self, name=None):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n container_resp = self.behaviors.create_rsa_container(\n name, secret_urls[0], secret_urls[1], secret_urls[2])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='rsa')", "def clone_collection(self, src_mongodb_uri, src_database, src_collection):\n # drop \"mongodb://\" suffix from uri\n src_conn = src_mongodb_uri[10:]\n if src_conn[-1] == \"/\":\n src_conn = src_conn[:-1]\n self.client.admin.command(\n {\"cloneCollection\": src_database + \".\" + src_collection, \"from\": src_conn}\n )" ]
[ "0.5772289", "0.5650864", "0.56195503", "0.5611972", "0.5600566", "0.5599026", "0.5561393", "0.55396396", "0.55313313", "0.5530797", "0.5530685", "0.552028", "0.55201644", "0.55154884", "0.5503272", "0.54946333", "0.5460324", "0.54556835", "0.54381055", "0.5433605", "0.54106903", "0.54060805", "0.540564", "0.540564", "0.540564", "0.540564", "0.540564", "0.53941405", "0.53932023", "0.5382238" ]
0.7597853
0
Raise an exception if the container does not exist
def _ensure_exists(name, path=None): if not exists(name, path=path): raise CommandExecutionError(f"Container '{name}' does not exist")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_nonexistant_container(self):\n ref = self.container_client._get_base_url() + '/invalid_uuid'\n get_resp = self.container_client.get_container(ref)\n self.assertEqual(get_resp.status_code, 404)", "def create_container_if_missing(container, swift_conn, options):\n try:\n swift_conn.head_container(container)\n except swift_client.ClientException, e:\n if e.http_status == httplib.NOT_FOUND:\n add_container = config.get_option(options,\n 'swift_store_create_container_on_put',\n type='bool', default=False)\n if add_container:\n try:\n swift_conn.put_container(container)\n except ClientException, e:\n msg = _(\"Failed to add container to Swift.\\n\"\n \"Got error from Swift: %(e)s\") % locals()\n raise glance.store.BackendException(msg)\n else:\n msg = (_(\"The container %(container)s does not exist in \"\n \"Swift. Please set the \"\n \"swift_store_create_container_on_put option\"\n \"to add container to Swift automatically.\")\n % locals())\n raise glance.store.BackendException(msg)\n else:\n raise", "def test_get_image_exists_not(self):\n with self.assertRaises(errors.NotFound):\n self.docker.images.get(\"image_does_not_exists\")", "def test_get_container(self):\n pass", "def test_delete_nonexistant_container(self):\n ref = self.container_client._get_base_url() + '/invalid_uuid'\n del_resp = self.behaviors.delete_container(ref)\n self.assertEqual(del_resp.status_code, 404)", "def test_container_exists():\n return exec_fn(_test_container_exists)", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def test_create_container(self):\n pass", "def test_create_container_w_invalid_type(self):\n container_resp = self.behaviors.create_container(\n 'name', 'bad_type', [])\n self.assertEqual(container_resp.status_code, 400)", "def test_construct_compose_dict_nonexisting_scraper(self):\n with self.assertRaises(ModuleNotFoundError):\n docker_compose.construct_compose_dict(\"nonexisting\")", "def ensure_container():\n return exec_fn(_init_container)", "def check_existing(self):\n if self.btcd_container != None:\n self.btcd_container.reload()\n if self.btcd_container.status == \"running\":\n rpcconn, container = self.detect_bitcoind_container(\n self.rpcconn.rpcport\n )\n if container == self.btcd_container:\n return rpcconn\n raise Exception(\"Ambigious Container running\")\n return None", "def raise_exists(*args_unused, **kwargs_unused):\n raise kazoo.client.NodeExistsError()", "def raise_exists(*args_unused, **kwargs_unused):\n raise kazoo.client.NodeExistsError()", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def put_container(self, container):\n if self.onest.create_bucket(container):\n LOG.debug('put_container, create success. '\n 'Container: %s.', container)\n else:\n # If return false, means exist\n LOG.info(_LI('put_container, '\n 'container(%s) exist, just use it.'), container)", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def get_container(self, account, container):\n \n pass", "def test_show_container(self):\n pass", "def test_no_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(description=\"container\", software_system=system1)", "def test_get_not_existing_item_raise_exception(config):\n p = MsSqlProvider(config)\n with pytest.raises(ProviderItemNotFoundError):\n p.get(-1)", "def _register_container(self, container):\n found = False\n try:\n for host, location, container in Container.Container.host_generator(container,\n known_networks=self.networks.keys()):\n websocket = \"ws\" in host.scheme or \"wss\" in host.scheme\n secured = 'https' in host.scheme or 'wss' in host.scheme\n http = 'http' in host.scheme or 'https' in host.scheme\n # it might return string if there's a error in processing\n if type(host) is not str:\n if (host.hostname, host.port) in self.hosts:\n existing_host: Host = self.hosts[(host.hostname, host.port)]\n existing_host.add_container(location, container, websocket=websocket, http=http)\n ## if any of the containers in for the virtualHost require https, the all others will be redirected to https.\n if secured:\n existing_host.secured = True\n host = existing_host\n else:\n host.secured = secured\n host.add_container(location, container, websocket=websocket, http=http)\n self.hosts[(host.hostname, host.port)] = host\n\n if host.secured:\n if host.hostname not in self.ssl_certificates:\n host.ssl_expiry = self.ssl.expiry_time(host.hostname)\n else:\n host.ssl_expiry = self.ssl_certificates[host.hostname]\n if (host.ssl_expiry - datetime.datetime.now()).days > 2:\n self.ssl_certificates[host.hostname] = host.ssl_expiry\n\n found = True\n self.containers.add(container.id)\n\n except Container.NoHostConiguration:\n print(\"Skip Container:\", \"No VIRTUAL_HOST configuration\", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n except Container.UnreachableNetwork:\n print(\"Skip Container:\", \"UNREACHABLE Network \", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n return found", "def test_find_config_nonexist(self):\n with pytest.raises(scuba.config.ConfigError):\n scuba.config.find_config()", "def image_exists(container: str) -> str:\n response = session.get(f\"{api_endpoint}/images/{container}/exists\")\n if response.status_code == 204:\n return \"{'response': 204}\"\n else:\n return response.json()", "def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)", "def raise_not_found(*_, **__):\n e = boto.exception.S3ResponseError(400, 'missing')\n e.error_code = 'NoSuchBucket'\n raise e", "def get_metadata_container(container_id, expected_container_type: ContainerType):\n for exp in registered_containers:\n if container_id in (str(exp.id), get_qualified_name(exp)):\n return exp\n\n raise MetadataContainerNotFound.of_container_type(\n container_type=expected_container_type, container_id=container_id\n )", "def test_exist(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n MetadataTemplate.exists(self.study)", "def test_resource_not_existing(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"connections.non_existing_connection.name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Resource connections/non_existing_connection does not exist.\"\n )", "def image_not_exists(self):\n res = subprocess.run(\n \"{} inspect {}\".format(self.binary, self.vars['image']),\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n return res.returncode" ]
[ "0.7576816", "0.6658006", "0.6570468", "0.6503031", "0.6378414", "0.6369641", "0.62627816", "0.6254908", "0.6067033", "0.59998596", "0.5997909", "0.5931003", "0.5895688", "0.5895688", "0.5887455", "0.58845174", "0.5854507", "0.57873976", "0.57844317", "0.57744735", "0.5717459", "0.5704382", "0.56860226", "0.56574875", "0.56547457", "0.56538534", "0.5637481", "0.5631506", "0.56057996", "0.5600719" ]
0.68097353
1
If the container is not currently running, start it. This function returns the state that the container was in before changing path path to the container parent directory
def _ensure_running(name, no_start=False, path=None): _ensure_exists(name, path=path) pre = state(name, path=path) if pre == "running": # This will be a no-op but running the function will give us a pretty # return dict. return start(name, path=path) elif pre == "stopped": if no_start: raise CommandExecutionError(f"Container '{name}' is not running") return start(name, path=path) elif pre == "frozen": if no_start: raise CommandExecutionError(f"Container '{name}' is not running") return unfreeze(name, path=path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n\n return start", "def start(name, **kwargs):\n path = kwargs.get(\"path\", None)\n cpath = get_root_path(path)\n lxc_config = kwargs.get(\"lxc_config\", None)\n cmd = \"lxc-start\"\n if not lxc_config:\n lxc_config = os.path.join(cpath, name, \"config\")\n # we try to start, even without config, if global opts are there\n if os.path.exists(lxc_config):\n cmd += f\" -f {shlex.quote(lxc_config)}\"\n cmd += \" -d\"\n _ensure_exists(name, path=path)\n if state(name, path=path) == \"frozen\":\n raise CommandExecutionError(f\"Container '{name}' is frozen, use lxc.unfreeze\")\n # lxc-start daemonize itself violently, we must not communicate with it\n use_vt = kwargs.get(\"use_vt\", None)\n with_communicate = kwargs.get(\"with_communicate\", False)\n return _change_state(\n cmd,\n name,\n \"running\",\n stdout=None,\n stderr=None,\n stdin=None,\n with_communicate=with_communicate,\n path=path,\n use_vt=use_vt,\n )", "def start(self):\n print \"start: starting container on {}\".format(self.daemon.host_name)\n\n if self.details is None:\n return None\n else:\n if not self.details['State']['Running'] is True:\n result = self.daemon.connection.start(self.config['release_name'],\n port_bindings=self.config['s_ports'],\n binds=self.config['binds'],\n links=self.config['links'])\n return result\n else:\n return None", "def start(self, with_delta=False):\n if self.running:\n raise ContainerError(\"Container '{}' already running.\".format(self.name))\n\n self._mountiso(os.path.join(self.containerpath, self.config.image))\n # check that the container is coherent with our deltas\n (isoid, release, arch) = utils.extract_cd_info(self.config.isomount)\n if self.config.command != \"upgrade\" and self.config.iso is not None:\n logger.debug(\"Checking that the container is compatible with the iso.\")\n if not (self.config.isoid == isoid and\n self.config.release == release and\n self.config.arch == arch):\n raise ContainerError(\"Can't reuse a previous run delta: the previous run was used with \"\n \"{deltaisoid}, {deltarelease}, {deltaarch} and {imagepath} is for \"\n \"{isoid}, {release}, {arch}. config use a compatible container.\"\n \"\".format(deltaisoid=self.config.isoid,\n deltarelease=self.config.release,\n deltaarch=self.config.arch,\n isoid=isoid, release=release, arch=arch,\n imagepath=self.config.image))\n if self.config.basedeltadir:\n logger.debug(\"Check that the delta has a compatible base delta in the container\")\n if not os.path.isdir(os.path.join(self.containerpath, self.config.basedeltadir)):\n raise ContainerError(\"No base delta found as {}. This means that we can't reuse \"\n \"this previous run with it. Please use a compatible container \"\n \"or restore this base delta.\".format(self.config.basedeltadir))\n self.config.isoid = isoid\n self.config.release = release\n self.config.arch = arch\n\n # regenerate a new runid, even if restarting an old run\n self.config.runid = int(time.time())\n\n self.config.archivedir = const.ARCHIVEDIR\n\n # tools and default config from otto\n self._copy_otto_files()\n\n logger.info(\"Starting container '{}'\".format(self.name))\n if not self.container.start():\n raise ContainerError(\"Can't start lxc container\")\n\n # Wait for the container to start\n self.container.wait('RUNNING', const.START_TIMEOUT)\n logger.info(\"Container '{}' started\".format(self.name))\n if not self.running:\n raise ContainerError(\"The container didn't start successfully\")", "def container_workingdir(self):\n return self.environment['HOME']", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def wait_started(name, path=None, timeout=300):\n if not exists(name, path=path):\n raise CommandExecutionError(f\"Container {name} does does exists\")\n if not state(name, path=path) == \"running\":\n raise CommandExecutionError(f\"Container {name} is not running\")\n ret = False\n if running_systemd(name, path=path):\n test_started = test_sd_started_state\n logger = log.error\n else:\n test_started = test_bare_started_state\n logger = log.debug\n now = time.time()\n expire = now + timeout\n now = time.time()\n started = test_started(name, path=path)\n while time.time() < expire and not started:\n time.sleep(0.3)\n started = test_started(name, path=path)\n if started is None:\n logger(\n \"Assuming %s is started, although we failed to detect that\"\n \" is fully started correctly\",\n name,\n )\n ret = True\n else:\n ret = started\n return ret", "def state(name, path=None):\n # Don't use _ensure_exists() here, it will mess with _change_state()\n\n cachekey = f\"lxc.state.{name}{path}\"\n try:\n return __context__[cachekey]\n except KeyError:\n if not exists(name, path=path):\n __context__[cachekey] = None\n else:\n cmd = \"lxc-info\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n _clear_context()\n raise CommandExecutionError(\n f\"Unable to get state of container '{name}'\"\n )\n c_infos = ret[\"stdout\"].splitlines()\n c_state = None\n for c_info in c_infos:\n stat = c_info.split(\":\")\n if stat[0].lower() == \"state\":\n c_state = stat[1].strip().lower()\n break\n __context__[cachekey] = c_state\n return __context__[cachekey]", "def _restart(self, docker_image_name):\n if self.docker_repo:\n docker_image_name = '%s/%s' % (self.docker_repo,\n docker_image_name)\n\n docker_container_name = '%s_%s' % (self.docker_image_name_prefix,\n self.scope)\n\n mounts = [\n DockerMount('/dev/log', '/dev/log', type='bind'),\n DockerMount(self.paths.sandbox_pipe_dir,\n self.paths.host_pipe_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_base_dir,\n self.paths.host_storlet_base_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_native_lib_dir,\n self.paths.host_storlet_native_lib_dir,\n type='bind', read_only=True),\n DockerMount(self.paths.sandbox_storlet_native_bin_dir,\n self.paths.host_storlet_native_bin_dir,\n type='bind', read_only=True)\n ]\n\n try:\n client = docker.from_env()\n # Stop the existing storlet container\n try:\n scontainer = client.containers.get(docker_container_name)\n except docker.errors.NotFound:\n # The container is not yet created\n pass\n else:\n scontainer.stop(timeout=self.sandbox_stop_timeout)\n\n # Check whether a new container can be started\n if self.max_containers_per_node > 0:\n all_scontainers = client.containers.list(\n filters={'label': 'managed_by=storlets'})\n if len(all_scontainers) >= self.max_containers_per_node:\n raise StorletRuntimeException(\n \"Cannot start a container because of limit\")\n\n # Start the new one\n client.containers.run(\n docker_image_name, detach=True, name=docker_container_name,\n network_disabled=True, mounts=mounts, user='swift',\n auto_remove=True, stop_signal='SIGHUP',\n labels={'managed_by': 'storlets'})\n except docker.errors.ImageNotFound:\n msg = \"Image %s is not found\" % docker_image_name\n raise StorletRuntimeException(msg)\n except docker.errors.APIError:\n self.logger.exception(\"Failed to manage docker containers\")\n raise StorletRuntimeException(\"Docker runtime error\")", "def is_container_running(self):\n return self._is_container_running", "def start(self):\n if self.session.status == Session.SessionStatus.RUNNING:\n return\n self._make_fifo()\n client = docker.from_env()\n\n volumes = {}\n if self.session.notebook:\n volumes.update(get_accessible_datasets_mount_dict(self.session.notebook))\n volumes.update({self.fifo_path: {\"bind\": f\"/{self.fifo_name}\"}})\n\n self.container = client.containers.run(\n \"m.docker-registry.ir/python:3.8-slim-buster\",\n [\"sh\", \"-c\", f\"python -i -u <>/{self.fifo_name}\"],\n stdin_open=True,\n detach=True,\n volumes=volumes\n )\n self.session.container_id = str(self.container.id)\n self.session.status = Session.SessionStatus.RUNNING\n self.session.run_counter = 1\n self.session.save()", "def path(self):\n return self._container_dir", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def start_container(self):\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Starting up container\")\n\n try:\n svc = self.docker_client().containers.get(self.dkr_name)\n except Exception:\n svc = self.docker_client().containers.run(image=self.dkr_image,\n name=self.dkr_name,\n command=self.dkr_command,\n ports=self.dkr_ports,\n links=self.dkr_links,\n environment=self.dkr_env,\n detach=True,\n remove=self.dkr_remove)\n\n while svc.status != \"running\":\n svc.reload()\n sleep(1)\n\n self.dkr_container = svc", "def restart(self):\n self.paths.create_host_pipe_dir()\n\n docker_image_name = self.scope\n try:\n self._restart(docker_image_name)\n self.wait()\n except StorletTimeout:\n raise\n except StorletRuntimeException:\n # We were unable to start docker container from the tenant image.\n # Let us try to start docker container from default image.\n self.logger.exception(\"Failed to start docker container from \"\n \"tenant image %s\" % docker_image_name)\n\n self.logger.info(\"Trying to start docker container from default \"\n \"image: %s\" % self.default_docker_image_name)\n self._restart(self.default_docker_image_name)\n self.wait()", "def working_directory(path):\n prev_cwd = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(prev_cwd)", "def with_workdir(self, path: str) -> \"Container\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"withWorkdir\", _args)\n return Container(_ctx)", "def start(self):\n return StateRunning(created_at=self.created_at)", "def working_directory(path):\n prev_cwd = Path.cwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(prev_cwd)", "def container_running(self, id=None, name=None):\n running = False\n if id:\n running = self.inspect_container(id)['State']['Running']\n elif name:\n running = self.inspect_container(name)['State']['Running']\n return running", "def docker_run(self) -> bool:\n containers = self.client.containers.list(filters={\"name\": self.cname})\n if containers:\n self.container = containers[0]\n return False\n\n info(\"Starting container {}...\".format(self.cname), nl=False)\n self.container = self.client.containers.run(\n image=self.image,\n detach=True,\n auto_remove=False,\n environment=self.env,\n hostname=self.dist,\n init=True,\n name=self.cname,\n remove=False,\n stdin_open=sys.stdin.isatty(),\n tty=True,\n volumes=self.volumes,\n entrypoint=\"bash\",\n command=[],\n )\n info(\"Done!\")\n\n return True", "def working_dir(path):\n starting_path = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(starting_path)", "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def test_bare_started_state(name, path=None):\n try:\n ret = run_all(name, \"ls\", path=path, ignore_retcode=True)[\"retcode\"] == 0\n except (CommandExecutionError,):\n ret = None\n return ret", "def move_to_parent(self, path):\n if path == self.dir_to_check:\n print (' Parent directory out of scope!')\n return path\n else:\n dir_name = os.path.dirname(path)\n return dir_name", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def start(self) -> None:\r\n # --ulimit nofile=<soft limit>:<hard limit> set the limit for open files\r\n docker_run_command = ('docker run --ulimit nofile=65535:65535 -td -p %d:8545 -p %d:30303 --rm --name %s %s' %\r\n (self.rpc_port, self.ethereum_network_port, self.name, IMAGE))\r\n sleep(0.6)\r\n result = self.ip.exec_command(docker_run_command)\r\n if result:\r\n if result.startswith('docker: Error'):\r\n print(result)\r\n print(self.ip)\r\n raise RuntimeError('An error occurs while starting docker container. Container maybe already exists')\r\n print('container of node %s of blockchain %s at %s:%s started' % (self.node_index, self.blockchain_id,\r\n self.ip.address, self.rpc_port))\r\n new_account_command = 'docker exec -t %s geth --datadir abc account new --password passfile' % self.name\r\n sleep(0.1)\r\n account = self.ip.exec_command(new_account_command).split()[-1][1:-1]\r\n sleep(0.3)\r\n if len(account) == 40: # check if the account is valid\r\n self.accounts.append(account)\r\n else:\r\n print('invalid account')", "def startLXCContainer(self,node,vmid):\n post_data = None\n data = self.connect('post','nodes/%s/lxc/%s/status/start' % (node,vmid), post_data)\n return data", "def start_path(self) -> str:\n return self.get_from_redis(\"start_path\")", "def cwd_in_path():\n ..." ]
[ "0.6525386", "0.5892428", "0.58381957", "0.5799751", "0.5768178", "0.55822957", "0.5555275", "0.5545344", "0.55098873", "0.5495534", "0.5403357", "0.5391542", "0.5390164", "0.5354356", "0.53539497", "0.5352874", "0.5324362", "0.53116184", "0.5291704", "0.5286928", "0.52610266", "0.5207346", "0.5192817", "0.5171044", "0.51632947", "0.5161177", "0.5154228", "0.5140747", "0.51099557", "0.51087594" ]
0.6442831
1
Freeze the named container path path to the container parent directory
def freeze(name, **kwargs): use_vt = kwargs.get("use_vt", None) path = kwargs.get("path", None) _ensure_exists(name, path=path) orig_state = state(name, path=path) start_ = kwargs.get("start", False) if orig_state == "stopped": if not start_: raise CommandExecutionError(f"Container '{name}' is stopped") start(name, path=path) cmd = "lxc-freeze" if path: cmd += f" -P {shlex.quote(path)}" ret = _change_state(cmd, name, "frozen", use_vt=use_vt, path=path) if orig_state == "stopped" and start_: ret["state"]["old"] = orig_state ret["started"] = True ret["state"]["new"] = state(name, path=path) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cwd (self, path):\r\n pass", "def component_docker_path(self, name: str) -> str:\n return str(self.component_path(name).parent.absolute())", "def cwd_in_path():\n ...", "def unfreeze(name, path=None, use_vt=None):\n _ensure_exists(name, path=path)\n if state(name, path=path) == \"stopped\":\n raise CommandExecutionError(f\"Container '{name}' is stopped\")\n cmd = \"lxc-unfreeze\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n return _change_state(cmd, name, \"running\", path=path, use_vt=use_vt)", "def test_redeploy_container_asset(self):\n pass", "def shadow_container(kls):\n if os.name == 'posix' and os.path.isdir('/dev/shm'):\n return '/dev/shm/'\n else:\n return gettempdir()", "def path(self):\n return self._container_dir", "def get_container_mount(path):\n host_dir = host_directory(path)\n cont_dir = get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC)\n return {\"host_dir\" : host_dir,\n \"container_dir\" : get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC),\n \"biobox_target\" : os.path.join(cont_dir, os.path.basename(path)) }", "def setupContainerFile(\n userNotif: UserNotif,\n ctx: ExecContext,\n rebuild: bool,\n inPlace: bool,\n cacheDir: Path) -> None:\n if not ctx.containerFile:\n raise RuntimeError(f\"{ctx.name}: container-file required\")\n\n localName, localFile = getLocalName(cacheDir, ctx.imageName, update=False)\n containerFileCopy: Optional[str] = None\n\n buildReasons: List[str] = []\n if rebuild:\n buildReasons.append(\"--rebuild set\")\n if not localFile.exists():\n buildReasons.append(f\"{localFile} doesn't exists\")\n elif localFile.read_text().strip() != ctx.containerFile.strip():\n if inPlace:\n # TODO: generalize this\n containerFileCopy = ctx.containerFile\n ctx.containerFile = \"\\n\".join([\n f\"FROM {localName}\",\n ctx.containerFile.split(\"\\n\")[-1]\n ])\n else:\n # TODO: show diff?\n ...\n buildReasons.append(f\"{localFile} content differ\")\n if not buildReasons and not podmanExists(\"image\", ctx.imageName):\n buildReasons.append(f\"{ctx.imageName} doesn't exist in the store\")\n\n if buildReasons:\n tmpFile = Path(str(localFile) + \".tmp\")\n tmpFile.parent.mkdir(parents=True, exist_ok=True)\n tmpFile.write_text(ctx.containerFile)\n userNotif(f\"Building {ctx.imageName} with {tmpFile} because: \" +\n \", \".join(buildReasons))\n try:\n build(tmpFile, localName, ctx.imageBuildCtx)\n except RuntimeError as e:\n raise RuntimeError(f\"Build of {tmpFile} failed: \" + str(e))\n if containerFileCopy:\n localFile.write_text(containerFileCopy)\n else:\n tmpFile.rename(localFile)", "def __update_container(self, path, obj_stat):\n try:\n self.logger.debug('Update container interface called')\n return self.asyn_helper.call \\\n (\"update_container\", path, obj_stat)\n except Exception as err:\n self.logger.error(('update_container for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def with_workdir(self, path: str) -> \"Container\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"withWorkdir\", _args)\n return Container(_ctx)", "def dependency_dir(self) -> Path:", "def _run_isolated_path(self):\n return self.m.swarming_client.path.join('run_isolated.py')", "def run_workdir(self, path):\n pass", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def without_mount(self, path: str) -> \"Container\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"withoutMount\", _args)\n return Container(_ctx)", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def overlay(tmp_path: pathlib.Path) -> pathlib.Path:\n shutil.copytree(\n _overlay.PATH,\n tmp_path,\n ignore=shutil.ignore_patterns(\"__init__.py\", \"__pycache__\"),\n dirs_exist_ok=True,\n )\n return tmp_path", "def cwd(self):", "def get_container_path(host_directory, container_prefix_path = \"/tmp\"):\n import hashlib\n from hashids import Hashids\n digest = funcy.rcompose(\n lambda x: hashlib.md5(x.encode('utf-8')).hexdigest(),\n lambda x: int(x, base=16),\n Hashids(min_length=6).encode)\n return os.path.join(container_prefix_path, digest(host_directory))", "def proc_docker_file(directory):\n print \"TASK-RUNNING\"\n os.rename(directory, directory + '_working')\n directory += '_working'\n try:\n dockerstack_agent.builder.do_build(directory)\n rmtree(directory)\n except Exception as e:\n traceback.print_exc()\n print \"TASK-ERROR\"\n raise e\n #finally:\n #Remove the directory\n\n print \"TASK-COMPLETE\"", "def _sibling_path(name):\n here = os.path.dirname(os.path.join(os.getcwd(), __file__))\n return os.path.normpath(os.path.join(here, name))", "def chdir(self, path):\n if not path:\n path = \"/\"\n elif not path.endswith(\"/\"):\n path = \"{}/\".format(path)\n res = self.get_cdmi(path)\n if res.ok():\n cdmi_info = res.json()\n # Check that object is a container\n if not cdmi_info[\"objectType\"] == CDMI_CONTAINER:\n return Response(406, u\"{0} isn't a container\".format(path))\n if cdmi_info[\"parentURI\"] == \"/\" and cdmi_info[\"objectName\"] == \"Home\":\n # root\n self._pwd = \"/\"\n else:\n self._pwd = \"{}{}\".format(\n cdmi_info[\"parentURI\"], cdmi_info[\"objectName\"]\n )\n return Response(0, \"ok\")\n else:\n return res", "def cwd_for_path(self, path):\n os_path = to_os_path(path, self.root_dir)\n # in the case of notebooks and kernels not being on the same filesystem,\n # walk up to root_dir if the paths don't exist\n while not os.path.isdir(os_path) and os_path != self.root_dir:\n os_path = os.path.dirname(os_path)\n return os_path", "def create_updater_path(self, filesystem, acc_dir, cont_dir, account, container):\n return '%s/%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, acc_dir, account, cont_dir, container)", "def set_basedir(self, host, path):", "def local_finder_artifacts() -> Path:\n return Path()", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def path_extern_mounts(self) -> PurePath:\n return self.path_extern_supervisor / MOUNTS_FOLDER", "def mount(self, name, path):\n if not os.path.exists(path):\n raise OSError('no mount path: '+ path)\n if name.startswith('/'):\n name = name[1:]\n self._top_paths[name] = path" ]
[ "0.5830676", "0.5546616", "0.55015755", "0.54711205", "0.5459175", "0.5365298", "0.53491515", "0.5209383", "0.5198676", "0.5176276", "0.51726145", "0.51573443", "0.5126788", "0.5097986", "0.5097181", "0.5077351", "0.5048799", "0.50351375", "0.50339293", "0.5004525", "0.50012857", "0.4998183", "0.49947175", "0.49810946", "0.4967449", "0.49550042", "0.49445415", "0.4941915", "0.49401712", "0.49255478" ]
0.5833475
0
Unfreeze the named container. path path to the container parent directory
def unfreeze(name, path=None, use_vt=None): _ensure_exists(name, path=path) if state(name, path=path) == "stopped": raise CommandExecutionError(f"Container '{name}' is stopped") cmd = "lxc-unfreeze" if path: cmd += f" -P {shlex.quote(path)}" return _change_state(cmd, name, "running", path=path, use_vt=use_vt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def without_mount(self, path: str) -> \"Container\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"withoutMount\", _args)\n return Container(_ctx)", "def unfrozen(self):\n archive_name = self.get_archive_name()\n\n # Decompilation for all .pyc files (inside of archive or binary)\n for pyc_file in self.unpack_archive(archive_name):\n self.decompilation(pyc_file)\n\n os.chdir(self.current_path)\n\n print(\"\\nWork is done.\")", "def clean(self, util_mod):\n super(ConanContainer, self).clean(util_mod)\n build = container.named_cache_dir(\"cmake-build\", ephemeral=True)\n util_mod.force_remove_tree(os.path.join(build, \"bin\"))\n util_mod.force_remove_tree(os.path.join(build, \"lib\"))", "def docker_module_shutdown(generator_start_dir: str, project_name: str) -> None:\n yield\n cwd = os.getcwd()\n project_dir = Path(generator_start_dir) / project_name\n if not project_dir.exists():\n return\n os.chdir(project_dir)\n Path(\"poetry.lock\").unlink(missing_ok=True)\n run_docker_compose_command(\"down -v\")\n os.chdir(cwd)", "def unseal(self, path: Union[bytes, str]) -> bytes:\n path = _to_bytes_or_null(path)\n data = ffi.new(\"uint8_t **\")\n data_size = ffi.new(\"size_t *\")\n ret = lib.Fapi_Unseal(self._ctx, path, data, data_size)\n _chkrc(ret)\n return bytes(ffi.unpack(_get_dptr(data, lib.Fapi_Free), data_size[0]))", "def unbootstrap(self, deployment_name):\n try:\n self._check_paths(self.deployments[deployment_name])\n except KeyError as e:\n logger.error(f\"Invalid deployment specified: {deployment_name}\\n Supported values: \" + (\", \").join(self.deployments.keys()) + f\"\\n\\tException: {e}\")\n raise SystemExit(1)\n\n self.deployments[deployment_name].unbootstrap()", "def remove(path):", "def unmount(\n path,\n ):\n try:\n LOG.debug('Unmounting %s', path)\n command_check_call(\n [\n '/bin/umount',\n '--',\n path,\n ],\n )\n except subprocess.CalledProcessError as e:\n raise UnmountError(e)\n\n os.rmdir(path)", "def drop_package ( self, name ):\n p = self._subdirs [name]\n del self._subdirs [name]\n p.fs_destroy()", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def freeze(name, **kwargs):\n use_vt = kwargs.get(\"use_vt\", None)\n path = kwargs.get(\"path\", None)\n _ensure_exists(name, path=path)\n orig_state = state(name, path=path)\n start_ = kwargs.get(\"start\", False)\n if orig_state == \"stopped\":\n if not start_:\n raise CommandExecutionError(f\"Container '{name}' is stopped\")\n start(name, path=path)\n cmd = \"lxc-freeze\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n ret = _change_state(cmd, name, \"frozen\", use_vt=use_vt, path=path)\n if orig_state == \"stopped\" and start_:\n ret[\"state\"][\"old\"] = orig_state\n ret[\"started\"] = True\n ret[\"state\"][\"new\"] = state(name, path=path)\n return ret", "def delete_container(ContainerName=None):\n pass", "def container_rm(self):\n cmd = self.lib.docker_cmd + [\"rm\", self.container_name]\n out, err, ret = justcall(cmd)\n if ret != 0:\n if \"unable to find\" in err:\n pass\n elif \"no such file\" in err:\n pass\n elif \"container has already been removed\" in err:\n pass\n elif \"has dependent containers which must be removed\" in err:\n pass\n elif \"no container with name\" in err:\n pass\n elif \"removal\" in err and \"already in progress\" in err:\n self.wait_for_removed()\n else:\n self.log.info(\" \".join(cmd))\n raise ex.excError(err)\n else:\n self.log.info(\" \".join(cmd))\n self.is_up_clear_cache()", "def unlink(self, path: PathLike):", "def remove_indiv_files(path):\n if isinstance(path, FSMap):\n path.fs.delete(path.root, recursive=True)\n else:\n fname, ext = os.path.splitext(path)\n if ext == '.zarr':\n shutil.rmtree(path)\n else:\n os.remove(path)", "def self_destruct(self, force_file_removal=False):\n self._unshare_linked_tree(directory=self.directory, force_file_removal=force_file_removal)", "def _umount_with_detach(entry_path):\n try:\n fs_linux.umount_filesystem(entry_path)\n except OSError as err:\n _LOGGER.warning('Failed to umount %s: %s',\n entry_path, err)\n # 16 means device busy\n if err.errno == 16:\n try:\n fs_linux.umount_filesystem(entry_path, lazy=True)\n except OSError as err:\n _LOGGER.warning('Failed to lazy umount %s: %s',\n entry_path, err)", "def unstage(self, path):\n self._git.index.remove(path)", "def unmount(self, client):\n log(\"Unmounting %s backend from %s\" %\n (self.backend, self.path), self.opt)\n getattr(client, self.unmount_fun)(mount_point=self.path)", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")", "def remove_file(path):\n pyCMD('hdfs', ['dfs', '-rm', '-skipTrash', path]).execute()", "def delete_container(self, container: Container):", "def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")", "def test_redeploy_container_asset(self):\n pass", "def remove_dir(path):\n pyCMD('hdfs', ['dfs', '-rm', '-r', '-f', '-skipTrash', path]).execute()", "def delete_container(self, account, container):\n \n pass", "def remove_vizant(tree):\n for childpath in [\".//target[@name='graph.init']\", \".//target[@name='graph.all']\", \".//target[@name='graph.sabbus']\"]:\n child = tree.find(childpath)\n parent = tree.find(\"%s/..\" % childpath)\n parent.remove(child)", "def __update_container(self, path, obj_stat):\n try:\n self.logger.debug('Update container interface called')\n return self.asyn_helper.call \\\n (\"update_container\", path, obj_stat)\n except Exception as err:\n self.logger.error(('update_container for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def clear(name):\n\n get_component(CachingPackage.COMPONENT_NAME).clear(name)", "def remove_docker_compose_file():\n os.remove(DOCKER_COMPOSE_FILE)" ]
[ "0.5753504", "0.55691415", "0.5461836", "0.5432628", "0.5406749", "0.53986204", "0.5376133", "0.5342398", "0.53159845", "0.5308569", "0.5266518", "0.5231988", "0.52299297", "0.5228473", "0.5225309", "0.5223065", "0.52178305", "0.51595366", "0.5118637", "0.51029444", "0.5100133", "0.508749", "0.50758994", "0.5061041", "0.50592256", "0.5045842", "0.5043947", "0.5036342", "0.49995816", "0.4972038" ]
0.7536665
0
Returns whether the named container exists. path
def exists(name, path=None): _exists = name in ls_(path=path) # container may be just created but we did cached earlier the # lxc-ls results if not _exists: _exists = name in ls_(cache=False, path=path) return _exists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def container_exists(self, id=None, name=None):\n exists = False\n if id and self.container_by_id(id):\n exists = True\n elif name and self.container_by_name(name):\n exists = True\n\n return exists", "def Exists(self, path: str) -> bool:\n ...", "def test_container_exists():\n return exec_fn(_test_container_exists)", "def exists(self, name):\n try:\n self.container.get_object(name)\n return True\n except NoSuchObject:\n return False", "def exist(name: str) -> bool:\n return bool(os.path.exists(name))", "def exists(self, path: str) -> bool:\n pass", "def check_for_docker_compose_file():\n return os.path.isfile(DOCKER_COMPOSE_FILE)", "def exists(path: str) -> bool:\n pass", "def exists(self, path):", "def is_image_exists(c, name):\n res = c.run('sudo docker images', hide='stdout')\n for image in res.stdout.split('\\n'):\n if name == image.split(' ')[0]:\n print('Image {name} exists'.format(name=name))\n return True\n\n print('Image {name} doesn\\'t exist'.format(name=name))\n return False", "def exists(path):\n return get_instance(path).exists(path)", "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def exists(self):\r\n return os.path.exists(self.full_path)", "def path_exists(path):\n if path.startswith('gs://'):\n command = 'gsutil ls {path}'.format(path=path)\n elif path.startswith('s3://'):\n command = 'awscli s3 ls {path}'.format(path=path)\n else:\n return os.path.exists(path)\n\n return run_quick(command, echo=False).returncode == 0", "def path_exists(path):\r\n return os.path.exists(path)", "def exists(path):\n return os.path.exists(path)", "def exists(redis_client: Redis, root_path) -> bool:\n return bool(redis_client.exists(root_path))", "def path_exists(path):\n return os.path.exists(path)", "def exists(path):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n try:\r\n return samba.file_exists(os.path.basename(path), os.path.dirname(path)) or \\\r\n samba.folder_exists(os.path.basename(path), os.path.dirname(path))\r\n except gaierror:\r\n logger.info(\"deportesalacarta.core.filetools exists: No es posible conectar con la ruta\")\r\n platformtools.dialog_notification(\"No es posible conectar con la ruta\", path)\r\n return True\r\n else:\r\n return os.path.exists(path)", "def exists(self):\n return self.path.exists()", "def exists(path: str) -> bool:\n return _fs().exists(path)", "def _ensure_exists(name, path=None):\n if not exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' does not exist\")", "def exists(self):\n return _os.path.exists(self.__str__())", "def exists(path):\n try:\n os.stat(path)\n except OSError:\n return False\n else:\n return True", "def exists(self) -> bool:\n p = pathlib.Path(self.summary_path)\n return p.exists()", "def exists(self, path: str) -> bool:\n return self.fs.exists(self._full_path(path))", "def exists(self):\n return bool(get_zone_by_name(self.get_name(refresh=False)))", "def exists(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).exists(name)", "def exists(self, path: PathLike):", "def exists(self, name):\n full_path = self.path(name)\n return self.__volume.exists(full_path)" ]
[ "0.73341024", "0.679069", "0.67812216", "0.6701584", "0.6686739", "0.66246325", "0.6556786", "0.65370065", "0.652017", "0.64798504", "0.64441574", "0.6364728", "0.6331161", "0.632475", "0.63070565", "0.62872106", "0.6267875", "0.62601227", "0.6252627", "0.62207246", "0.6199016", "0.61872697", "0.61641115", "0.61540574", "0.61299664", "0.6123628", "0.6122077", "0.61150503", "0.6114177", "0.6094707" ]
0.77212536
0
Returns the state of a container. path
def state(name, path=None): # Don't use _ensure_exists() here, it will mess with _change_state() cachekey = f"lxc.state.{name}{path}" try: return __context__[cachekey] except KeyError: if not exists(name, path=path): __context__[cachekey] = None else: cmd = "lxc-info" if path: cmd += f" -P {shlex.quote(path)}" cmd += f" -n {name}" ret = __salt__["cmd.run_all"](cmd, python_shell=False) if ret["retcode"] != 0: _clear_context() raise CommandExecutionError( f"Unable to get state of container '{name}'" ) c_infos = ret["stdout"].splitlines() c_state = None for c_info in c_infos: stat = c_info.split(":") if stat[0].lower() == "state": c_state = stat[1].strip().lower() break __context__[cachekey] = c_state return __context__[cachekey]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path(self):\n return self._container_dir", "def _state_data_path(self) -> Path:\n return Path(self._state_dir, 'state')", "def solution_path(self) -> list[State]:", "def lookup_container(path):\n for container in containers():\n rpath = probe_path_in_container(container, path)\n if rpath:\n return container, rpath\n return None, None", "def probe_path_in_container(container, path):\n cmd = [ 'docker', 'inspect', '--format', '{{json .Mounts}}', container ]\n with popen_text(cmd) as docker:\n for volume in json.load(docker.stdout):\n # TODO: do we need to skip some of the types? D'uh\n if path.startswith(volume['Source']):\n # TODO: may the path have a trailing slash?\n return volume['Destination'] + path[len(volume['Source']):]\n return None", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def _state_dir(self) -> Path:\n assert self._project_root is not None\n return Path(self._project_root, '.cache/bacloud')", "def get_process_state(self, path, params):\n reply = self._local_collector.get_process_state()\n self._augment_state_reply(reply, path)\n return reply", "def get_state(self, uid):\n with self._directory_lock:\n return self._directory[uid]", "def get_state_output(self, state: str) -> Dict[str, Any]:\n return read_yaml(self._dir_path / f'{state}.yaml')", "def __get_container_stat(self, path, container_stat_obj, request_from_updater = False):\n try:\n self.logger.debug('Get container interface called')\n self.asyn_helper.call(\"get_container_stat\", path, container_stat_obj, request_from_updater)\n except Exception as err:\n self.logger.error(('get_container_stat for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def get_container_mount(path):\n host_dir = host_directory(path)\n cont_dir = get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC)\n return {\"host_dir\" : host_dir,\n \"container_dir\" : get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC),\n \"biobox_target\" : os.path.join(cont_dir, os.path.basename(path)) }", "def get_path(self):\n return self.path", "def disk_state(self) -> str:\n return pulumi.get(self, \"disk_state\")", "def get_path(self, node, state, request, **kwargs):\n unitStatus = state.get_data_attr('unitStatus')\n return unitStatus.unit.get_study_url(request.path)", "def replanning_path(self):\n start_state = self.extract_start_state()\n goal_state = self.extract_goal_state()", "def path(self):\n return self.path", "def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data", "def getPath(self):\n return self.path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path" ]
[ "0.637723", "0.58938307", "0.585438", "0.5835431", "0.5798212", "0.5772527", "0.5686279", "0.5683623", "0.56019366", "0.5528277", "0.5528032", "0.54865354", "0.5476503", "0.54574996", "0.5451577", "0.5444445", "0.5443946", "0.5442292", "0.54314566", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911", "0.5414911" ]
0.6676328
0
Returns the value of a cgroup parameter for a container path path to the container parent directory
def get_parameter(name, parameter, path=None): _ensure_exists(name, path=path) cmd = "lxc-cgroup" if path: cmd += f" -P {shlex.quote(path)}" cmd += f" -n {name} {parameter}" ret = __salt__["cmd.run_all"](cmd, python_shell=False) if ret["retcode"] != 0: raise CommandExecutionError(f"Unable to retrieve value for '{parameter}'") return ret["stdout"].strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_cgroup(self):\n if os.environ.get(\"NO_CGROUP\"):\n return None\n pid = os.getpid()\n cfile = \"/proc/{}/cgroup\".format(pid)\n # TODO REMOVE THIS OR FIGURE OUT FOR TESTING WHAT TO DO ABOUT THIS\n if not os.path.exists(cfile):\n raise Exception(f\"Couldn't find cgroup {cfile}\")\n else:\n with open(cfile) as f:\n for line in f:\n if line.find(\"htcondor\") > 0:\n items = line.split(\":\")\n if len(items) == 3:\n return items[2].strip()\n\n raise Exception(f\"Couldn't parse out cgroup from {cfile}\")", "def get_current_container_id():\n with open('/proc/self/cgroup', 'rt') as cgroup_file:\n for line in cgroup_file.readlines():\n return re.sub(r'^docker-', '', re.sub(r'\\.scope$', '', re.sub(r'^.*\\/', '', line.strip())))", "def component_docker_path(self, name: str) -> str:\n return str(self.component_path(name).parent.absolute())", "def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.resource.container\n if parent_path != \"/\":\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)", "def probe_path_in_container(container, path):\n cmd = [ 'docker', 'inspect', '--format', '{{json .Mounts}}', container ]\n with popen_text(cmd) as docker:\n for volume in json.load(docker.stdout):\n # TODO: do we need to skip some of the types? D'uh\n if path.startswith(volume['Source']):\n # TODO: may the path have a trailing slash?\n return volume['Destination'] + path[len(volume['Source']):]\n return None", "def path(self):\n return self._container_dir", "def get_docker_container_id(): # type: () -> t.Optional[str]\n path = '/proc/self/cpuset'\n container_id = None\n\n if os.path.exists(path):\n # File content varies based on the environment:\n # No Container: /\n # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507\n # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891\n # Podman: /../../../../../..\n contents = read_text_file(path)\n\n cgroup_path, cgroup_name = os.path.split(contents.strip())\n\n if cgroup_path in ('/docker', '/azpl_job'):\n container_id = cgroup_name\n\n if container_id:\n display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)\n\n return container_id", "def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.collection.container\n if parent_path not in ('/', 'null'):\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)", "def cwd_in_path():\n ...", "def _get_container_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_container_name = f\"{dirname}_{self.config_name}\"\n container_name = self.config_options.get(\"container_name\", default_container_name)\n return container_name", "def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret", "def get_path(path, parent=None, prj=None):\n if prj is None:\n prj = QgsProject.instance()\n\n if parent is None:\n parent = prj.layerTreeRoot()\n\n if path is None:\n return parent\n if not isinstance(path, (list, tuple)):\n path = path.split(\"/\")\n\n for part in path:\n if len(path) > 0:\n parent = get_group(part, parent)\n\n return parent", "def lookup_container(path):\n for container in containers():\n rpath = probe_path_in_container(container, path)\n if rpath:\n return container, rpath\n return None, None", "def path_child(path):\n return path_to_str(parse_path(path)[1:])", "def get_pid(name, path=None):\n if name not in list_(limit=\"running\", path=path):\n raise CommandExecutionError(\n f\"Container {name} is not running, can't determine PID\"\n )\n info = __salt__[\"cmd.run\"](f\"lxc-info -n {name}\").split(\"\\n\")\n pid = [\n line.split(\":\")[1].strip()\n for line in info\n if re.match(r\"\\s*PID\", line) is not None\n ][0]\n return pid", "def get_parent_name_and_child(self, path):\n if path == '/':\n return \n dirname, name = os.path.split(path)\n parent = self.get_path(dirname)\n if parent:\n try:\n child = parent.get_child(name)\n return parent, name, child\n except KeyError:\n return parent, name, None\n else:\n return None, name, None", "def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))", "def get_kernel_meta_parent_dir(attrs):\n attrs_dict = {}\n if isinstance(attrs, str):\n attrs_dict = json.loads(attrs)\n elif isinstance(attrs, dict):\n attrs_dict = attrs\n return os.path.realpath(attrs_dict.get(\"compile_cache\"))", "def get_container_mount(path):\n host_dir = host_directory(path)\n cont_dir = get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC)\n return {\"host_dir\" : host_dir,\n \"container_dir\" : get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC),\n \"biobox_target\" : os.path.join(cont_dir, os.path.basename(path)) }", "def __get_docker_file_path(path):\n if os.path.isfile(path):\n return path\n for dc_filename in DEFAULT_DC_FILENAMES:\n file_path = os.path.join(path, dc_filename)\n if os.path.isfile(file_path):\n return file_path\n # implicitly return None", "def get_root_path(path):\n if not path:\n path = __opts__.get(\"lxc.root_path\", DEFAULT_PATH)\n return path", "def GetParentNameForAnalyzeIamPolicy(organization,\n folder,\n attribute='root cloud asset'):\n VerifyParentForAnalyzeIamPolicy(organization, folder, attribute)\n if organization:\n return 'organizations/{0}'.format(organization)\n return 'folders/{0}'.format(folder)", "def _get_path(self, volume_path):\n return os.path.join(\n self.volume_prefix,\n volume_path.group_id if volume_path.group_id is not None else NO_GROUP_NAME,\n volume_path.volume_id)", "def GcsDirname(path):\n return os.path.dirname(path)", "def get_parent(path):\n\n # security check\n parent = os.path.dirname(path)\n\n try:\n get_abspath(parent)\n except:\n parent = ''\n\n return parent", "def get_parent_dir(path):\n return os.path.dirname(path)", "def path(self):\r\n return self.chroot", "def path(self):\r\n return self.chroot", "def path(self):\n return self.chroot", "def get_component_config_value(self) -> dict:\n package_type, package_name, *path = self.PATH.split(\".\")\n file_path = Path(f\"{package_type}\") / package_name / f\"{package_type[:-1]}.yaml\"\n\n with open(file_path, \"r\") as fp:\n data = yaml_load(fp)\n\n value = data\n for i in path:\n value = value[i]\n return value" ]
[ "0.65769273", "0.62457204", "0.5867603", "0.5534241", "0.5521419", "0.5441542", "0.5439973", "0.5414378", "0.53632414", "0.5348421", "0.52795565", "0.5239584", "0.5190792", "0.51211596", "0.5106422", "0.5100882", "0.5077889", "0.50685614", "0.5065297", "0.50638187", "0.50388163", "0.5025937", "0.50251096", "0.5007036", "0.5003834", "0.499202", "0.49899232", "0.49899232", "0.49578428", "0.49440125" ]
0.6499566
1
Set the value of a cgroup parameter for a container. path path to the container parent directory
def set_parameter(name, parameter, value, path=None): if not exists(name, path=path): return None cmd = "lxc-cgroup" if path: cmd += f" -P {shlex.quote(path)}" cmd += f" -n {name} {parameter} {value}" ret = __salt__["cmd.run_all"](cmd, python_shell=False) if ret["retcode"] != 0: return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()", "def setPath(*args):", "def set_zonepath(self, path):\n self.set_attr(ZONE_ENTRY['ZROOT'], path)", "def get_parameter(name, parameter, path=None):\n _ensure_exists(name, path=path)\n cmd = \"lxc-cgroup\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name} {parameter}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n raise CommandExecutionError(f\"Unable to retrieve value for '{parameter}'\")\n return ret[\"stdout\"].strip()", "def write_parameter(self, path, value, attr=None):\n if path.startswith('sample'):\n entry = self.entry.nxroot['entry']\n else:\n entry = self.entry\n if value is not None:\n if attr and path in entry:\n entry[path].attrs[attr] = value\n elif path in entry:\n if isinstance(entry[path], NXgroup):\n del entry[path]\n entry[path] = value\n else:\n entry[path].replace(value)\n elif attr is None:\n entry[path] = value", "def setCurrent(userip, foldername):\n userspace[session[userip]].folder = foldername", "def set_value(attr_name, value, gpu_id):\n place = fluid.CPUPlace() if gpu_id < 0 \\\n else fluid.CUDAPlace(gpu_id)\n var = _fetch_var(attr_name, return_numpy=False)\n var.set(value, place)", "def cwd (self, path):\r\n pass", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()", "def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def set_path(self, key, value):\n return set_path(self, key, self.from_obj(value))", "def __init__(__self__, *,\n cgroup_mode: Optional[pulumi.Input['LinuxNodeConfigCgroupMode']] = None,\n sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if cgroup_mode is not None:\n pulumi.set(__self__, \"cgroup_mode\", cgroup_mode)\n if sysctls is not None:\n pulumi.set(__self__, \"sysctls\", sysctls)", "def cd(self,path):\n self.cwd = path", "def chdir(self, path):\n if not path:\n path = \"/\"\n elif not path.endswith(\"/\"):\n path = \"{}/\".format(path)\n res = self.get_cdmi(path)\n if res.ok():\n cdmi_info = res.json()\n # Check that object is a container\n if not cdmi_info[\"objectType\"] == CDMI_CONTAINER:\n return Response(406, u\"{0} isn't a container\".format(path))\n if cdmi_info[\"parentURI\"] == \"/\" and cdmi_info[\"objectName\"] == \"Home\":\n # root\n self._pwd = \"/\"\n else:\n self._pwd = \"{}{}\".format(\n cdmi_info[\"parentURI\"], cdmi_info[\"objectName\"]\n )\n return Response(0, \"ok\")\n else:\n return res", "def setPath(self, name, value):\n response = self.extendPath(name, value, True, True)\n return response", "def set(self, path, data):\n try:\n self.param_tree.set(path, data)\n except ParameterTreeError as e:\n raise FileInterfaceError(e)", "def set_basedir(self, host, path):", "def setSubdir(self, path):\n self.subdir = path", "def setCacheRoot(self, path):\n self.cache_root = path", "def src_subpath(self, val: str):\n self[\"src_subpath\"] = val", "def _set_cache_dir(self, path):\n assert path, 'Must input a directory path'\n self.manager.cache_dir = path", "def setDataRoot(path):\n global dataRoot\n dataRoot = os.path.realpath(path)", "def setLXCContainerOptions(self,node,vmid,post_data):\n data = self.connect('put',\"nodes/%s/lxc/%s/config\" % (node,vmid), post_data)\n return data", "def set(self, name_group, key, value):\n self.psettings.beginGroup(name_group)\n self.psettings.setValue(key, value)\n self.closeGroup()", "def _setPath(self, path):\n self.path = os.path.abspath(path)\n\n print('path = ' + path)\n try:\n os.chdir(self.path)\n except OSError as exc:\n LOGGER.error('Path doesn''t exist: %s' % (path))\n LOGGER.exception(exc)\n raise (exc)\n\n # check for path in the new Radiance directory:\n def _checkPath(path): # create the file structure if it doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n print('Making path: '+path)", "def set_config(variable, value):\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"set\", variable, value)\n else:\n cmd = _traffic_line(\"-s\", variable, \"-v\", value)\n\n log.debug(\"Setting %s to %s\", variable, value)\n return _subprocess(cmd)", "def changeConfVar(varName, varValue, file=\"config/scipion.conf\", escapeSlash=False):\n if escapeSlash:\n varValue = varValue.replace('/', '\\/')\n command = ['bash', '-c', 'sed -i -e '\n '\"s/%s = .*/%s = %s/\" '\n '%s' % (varName, varName, varValue, file)]\n\n return command", "def set_by_path(data: Dict[str, T], path: Sequence[str], value: T):\n get_by_path(data, path[:-1])[path[-1]] = value", "def TeamCityParamSetter(keyName, value):\n print(\"##teamcity[setParameter name='{}' value='{}']\".format(keyName, value))" ]
[ "0.5628445", "0.5346549", "0.52035743", "0.52012026", "0.5184149", "0.5179433", "0.5116018", "0.5091567", "0.5006781", "0.49869108", "0.49837905", "0.49817753", "0.49750057", "0.4939701", "0.4900112", "0.4869992", "0.48482382", "0.4794292", "0.47680622", "0.47667417", "0.46847227", "0.4670777", "0.46685475", "0.46638814", "0.46556032", "0.4625012", "0.46173757", "0.46128002", "0.46026647", "0.46024284" ]
0.69942623
0
Get the operational state of a systemd based container path path to the container parent
def systemd_running_state(name, path=None): try: ret = run_all( name, "systemctl is-system-running", path=path, ignore_retcode=True )["stdout"] except CommandExecutionError: ret = "" return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state(name, path=None):\n # Don't use _ensure_exists() here, it will mess with _change_state()\n\n cachekey = f\"lxc.state.{name}{path}\"\n try:\n return __context__[cachekey]\n except KeyError:\n if not exists(name, path=path):\n __context__[cachekey] = None\n else:\n cmd = \"lxc-info\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n _clear_context()\n raise CommandExecutionError(\n f\"Unable to get state of container '{name}'\"\n )\n c_infos = ret[\"stdout\"].splitlines()\n c_state = None\n for c_info in c_infos:\n stat = c_info.split(\":\")\n if stat[0].lower() == \"state\":\n c_state = stat[1].strip().lower()\n break\n __context__[cachekey] = c_state\n return __context__[cachekey]", "def path(self):\n return self._container_dir", "def probe_path_in_container(container, path):\n cmd = [ 'docker', 'inspect', '--format', '{{json .Mounts}}', container ]\n with popen_text(cmd) as docker:\n for volume in json.load(docker.stdout):\n # TODO: do we need to skip some of the types? D'uh\n if path.startswith(volume['Source']):\n # TODO: may the path have a trailing slash?\n return volume['Destination'] + path[len(volume['Source']):]\n return None", "def container_workingdir(self):\n return self.environment['HOME']", "def component_docker_path(self, name: str) -> str:\n return str(self.component_path(name).parent.absolute())", "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def get_current_container_id():\n with open('/proc/self/cgroup', 'rt') as cgroup_file:\n for line in cgroup_file.readlines():\n return re.sub(r'^docker-', '', re.sub(r'\\.scope$', '', re.sub(r'^.*\\/', '', line.strip())))", "def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.resource.container\n if parent_path != \"/\":\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)", "def get_container_mount(path):\n host_dir = host_directory(path)\n cont_dir = get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC)\n return {\"host_dir\" : host_dir,\n \"container_dir\" : get_container_path(host_dir, BIOBOX_INPUT_MOUNT_LOC),\n \"biobox_target\" : os.path.join(cont_dir, os.path.basename(path)) }", "def __get_container_stat(self, path, container_stat_obj, request_from_updater = False):\n try:\n self.logger.debug('Get container interface called')\n self.asyn_helper.call(\"get_container_stat\", path, container_stat_obj, request_from_updater)\n except Exception as err:\n self.logger.error(('get_container_stat for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data", "def lookup_container(path):\n for container in containers():\n rpath = probe_path_in_container(container, path)\n if rpath:\n return container, rpath\n return None, None", "def work_root(session):\n return session[\"AVALON_WORKDIR\"]", "def path(self):\r\n return self.chroot", "def path(self):\r\n return self.chroot", "def path(self):\n return self.chroot", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def path_mounts(self) -> Path:\n return self.path_supervisor / MOUNTS_FOLDER", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def get_root_path(path):\n if not path:\n path = __opts__.get(\"lxc.root_path\", DEFAULT_PATH)\n return path", "def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.collection.container\n if parent_path not in ('/', 'null'):\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)", "def test_current_path():\n output = sh.current_path()\n assert isinstance(output, str) and len(output) > 0", "def get_nsa_container_string(context):\n command = \"cd /tmp/cme/local/GFD;ls -1tr | tail -1\"\n return context.cme_session.send_ssh_command(command=command)", "def get_cont_stat(self, path, request_from_updater = False):\n try:\n self.logger.debug('Called get container stat interface of library')\n container_stat_obj = ContainerStatWithStatus()\n self.__get_container_stat(path, container_stat_obj, request_from_updater)\n status = container_stat_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n if status == OsdExceptionCode.OSD_INTERNAL_ERROR:\n self.logger.debug('Internal error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_FILE_OPERATION_ERROR:\n self.logger.debug('File operatiopn error raised from library')\n return HTTPInternalServerError\n elif status == OsdExceptionCode.OSD_NOT_FOUND:\n self.logger.debug('File not found error raised from library')\n return HTTPNotFound\n else:\n pass\n cont_stat = container_stat_obj.container_stat\n return {'account' : cont_stat.account, \\\n 'container' : cont_stat.container, \\\n 'created_at' : cont_stat.created_at, \\\n 'put_timestamp' : cont_stat.put_timestamp , \\\n 'delete_timestamp' : cont_stat.delete_timestamp, \\\n 'object_count' : cont_stat.object_count, \\\n 'bytes_used' : cont_stat.bytes_used, \\\n 'hash' : cont_stat.hash, 'id' : cont_stat.id, \\\n 'status' : cont_stat.status, \\\n 'status_changed_at' : cont_stat.status_changed_at, \\\n 'metadata' : cont_stat.metadata}\n except Exception as err:\n self.logger.exception(err)\n raise err", "def get_mount_point(path):\n\n path = os.path.abspath(path)\n while path != os.path.sep:\n if os.path.ismount(path):\n return path\n path = os.path.abspath(os.path.join(path, os.pardir))\n return path", "def current(userip):\n return userspace[session[userip]].folder", "def _get_path(client):\n if client is None:\n client = docker.from_env()\n\n info = client.info()\n return os.path.join(info['DockerRootDir'], _CREDENTIAL_SPECS_PATH)", "def test_sd_started_state(name, path=None):\n qstate = systemd_running_state(name, path=path)\n if qstate in (\"initializing\", \"starting\"):\n return False\n elif qstate == \"\":\n return None\n else:\n return True", "def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'", "def info(name, path=None):\n cachekey = f\"lxc.info.{name}{path}\"\n try:\n return __context__[cachekey]\n except KeyError:\n _ensure_exists(name, path=path)\n cpath = get_root_path(path)\n try:\n conf_file = os.path.join(cpath, name, \"config\")\n except AttributeError:\n conf_file = os.path.join(cpath, str(name), \"config\")\n\n if not os.path.isfile(conf_file):\n raise CommandExecutionError(f\"LXC config file {conf_file} does not exist\")\n\n ret = {}\n config = []\n with salt.utils.files.fopen(conf_file) as fp_:\n for line in fp_:\n line = salt.utils.stringutils.to_unicode(line)\n comps = [x.strip() for x in line.split(\"#\", 1)[0].strip().split(\"=\", 1)]\n if len(comps) == 2:\n config.append(tuple(comps))\n\n ifaces = []\n current = {}\n\n for key, val in config:\n if key == \"lxc.network.type\":\n current = {\"type\": val}\n ifaces.append(current)\n elif not current:\n continue\n elif key.startswith(\"lxc.network.\"):\n current[key.replace(\"lxc.network.\", \"\", 1)] = val\n if ifaces:\n ret[\"nics\"] = ifaces\n\n ret[\"rootfs\"] = next((x[1] for x in config if x[0] == \"lxc.rootfs\"), None)\n ret[\"state\"] = state(name, path=path)\n ret[\"ips\"] = []\n ret[\"public_ips\"] = []\n ret[\"private_ips\"] = []\n ret[\"public_ipv4_ips\"] = []\n ret[\"public_ipv6_ips\"] = []\n ret[\"private_ipv4_ips\"] = []\n ret[\"private_ipv6_ips\"] = []\n ret[\"ipv4_ips\"] = []\n ret[\"ipv6_ips\"] = []\n ret[\"size\"] = None\n ret[\"config\"] = conf_file\n\n if ret[\"state\"] == \"running\":\n try:\n limit = int(get_parameter(name, \"memory.limit_in_bytes\"))\n except (CommandExecutionError, TypeError, ValueError):\n limit = 0\n try:\n usage = int(get_parameter(name, \"memory.usage_in_bytes\"))\n except (CommandExecutionError, TypeError, ValueError):\n usage = 0\n free = limit - usage\n ret[\"memory_limit\"] = limit\n ret[\"memory_free\"] = free\n size = run_stdout(name, \"df /\", path=path, python_shell=False)\n # The size is the 2nd column of the last line\n ret[\"size\"] = size.splitlines()[-1].split()[1]\n\n # First try iproute2\n ip_cmd = run_all(name, \"ip link show\", path=path, python_shell=False)\n if ip_cmd[\"retcode\"] == 0:\n ip_data = ip_cmd[\"stdout\"]\n ip_cmd = run_all(name, \"ip addr show\", path=path, python_shell=False)\n ip_data += \"\\n\" + ip_cmd[\"stdout\"]\n ip_data = salt.utils.network._interfaces_ip(ip_data)\n else:\n # That didn't work, try ifconfig\n ip_cmd = run_all(name, \"ifconfig\", path=path, python_shell=False)\n if ip_cmd[\"retcode\"] == 0:\n ip_data = salt.utils.network._interfaces_ifconfig(ip_cmd[\"stdout\"])\n else:\n # Neither was successful, give up\n log.warning(\"Unable to run ip or ifconfig in container '%s'\", name)\n ip_data = {}\n\n ret[\"ipv4_ips\"] = salt.utils.network.ip_addrs(\n include_loopback=True, interface_data=ip_data\n )\n ret[\"ipv6_ips\"] = salt.utils.network.ip_addrs6(\n include_loopback=True, interface_data=ip_data\n )\n ret[\"ips\"] = ret[\"ipv4_ips\"] + ret[\"ipv6_ips\"]\n for address in ret[\"ipv4_ips\"]:\n if address == \"127.0.0.1\":\n ret[\"private_ips\"].append(address)\n ret[\"private_ipv4_ips\"].append(address)\n elif salt.utils.cloud.is_public_ip(address):\n ret[\"public_ips\"].append(address)\n ret[\"public_ipv4_ips\"].append(address)\n else:\n ret[\"private_ips\"].append(address)\n ret[\"private_ipv4_ips\"].append(address)\n for address in ret[\"ipv6_ips\"]:\n if address == \"::1\" or address.startswith(\"fe80\"):\n ret[\"private_ips\"].append(address)\n ret[\"private_ipv6_ips\"].append(address)\n else:\n ret[\"public_ips\"].append(address)\n ret[\"public_ipv6_ips\"].append(address)\n\n for key in [x for x in ret if x == \"ips\" or x.endswith(\"ips\")]:\n ret[key].sort(key=_ip_sort)\n __context__[cachekey] = ret\n return __context__[cachekey]" ]
[ "0.59621274", "0.5824988", "0.5726111", "0.5703246", "0.56873155", "0.5641615", "0.5511324", "0.54945695", "0.54908264", "0.54837257", "0.5480335", "0.5418569", "0.5393705", "0.53738993", "0.53738993", "0.53121793", "0.5269728", "0.52653515", "0.52647144", "0.51990974", "0.51749593", "0.51187146", "0.5094586", "0.5079236", "0.5077768", "0.5070588", "0.50690526", "0.5065296", "0.5050681", "0.50117904" ]
0.59827733
0
Return True if the named container can be attached to via the lxcattach command path path to the container parent
def attachable(name, path=None): cachekey = f"lxc.attachable{name}{path}" try: return __context__[cachekey] except KeyError: _ensure_exists(name, path=path) # Can't use run() here because it uses attachable() and would # endlessly recurse, resulting in a traceback log.debug("Checking if LXC container %s is attachable", name) cmd = "lxc-attach" if path: cmd += f" -P {shlex.quote(path)}" cmd += f" --clear-env -n {name} -- /usr/bin/env" result = ( __salt__["cmd.retcode"]( cmd, python_shell=False, output_loglevel="quiet", ignore_retcode=True ) == 0 ) __context__[cachekey] = result return __context__[cachekey]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def req_build(container):\n try:\n return 'dockerfile' in self.kard.env.get_container(container)\n except KeyError:\n return False", "def is_inside_im_container() -> bool:\n # TODO(*): Why not testing only STAGE?\n condition = (\n os.environ.get(\"STAGE\") == \"TEST\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_test\"\n ) or (\n os.environ.get(\"STAGE\") == \"LOCAL\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_local\"\n )\n return condition", "def booted_from_volume(volumes_list):\n if any('/dev/vda' in volume['attachments'] for volume in\n volumes_list):\n return True\n return False", "def in_host():\n return not in_docker()", "def _attach_external_volume_or_instance(instance_id):\n\n if not utils.use_external_resource(ctx.source.node.properties) \\\n or not utils.use_external_resource(\n ctx.target.node.properties):\n return False\n\n ctx.source.instance.runtime_properties['instance_id'] = \\\n instance_id\n ctx.logger.info(\n 'Either instance or volume is an external resource so not '\n 'performing attach operation.')\n return True", "def _is_docker(self) -> bool:\n from hubble.executor.helper import is_valid_docker_uri\n\n uses = getattr(self.args, 'uses', '')\n return is_valid_docker_uri(uses)", "def get_details_using_inspect_command(self, container_id):\n\n try:\n p = Popen(DOCKER_INSPECT_CMD.format(container_id), shell=True, stdout=PIPE, stderr=PIPE)\n data_dump, stderr_data = p.communicate()\n log.debug('{}[*]{} Inspect result:{}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, \n json.dumps(json.loads(data_dump)),indent=4))\n\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR, e))\n return False\n\n self.data = json.loads(data_dump.decode('utf-8'))\n\n if not self.data:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'Please check if container id is valid'))\n return False\n\n self.storage_driver = self.data[0]['Driver']\n self.pid = self.data[0]['State']['Pid']\n self.container_id = self.data[0]['Id']\n\n log.debug('{}[*]{} Storage Driver: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.storage_driver))\n if self.storage_driver == 'overlay2' or self.storage_driver == 'overlay':\n self.IS_OVERLAYFS = True\n self.overlay_upperdir_path = self.data[0]['GraphDriver']['Data']['UpperDir']\n self.overlay_merged_path = self.data[0]['GraphDriver']['Data']['MergedDir']\n elif self.storage_driver == 'aufs':\n self.IS_AUFSFS = True\n self.aufs_container_layerdb_path = AUFS_IMAGE_LAYERDB_PATH + self.data[0]['Id']\n else:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'This storage driver does not support'))\n False\n\n log.debug('{}[*]{} Container id: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.container_id))\n return True", "def test_attach(self, check_docker, containers, log_worker, time_):\n check_docker.return_value = True\n\n container1 = MagicMock()\n container1.name = f\"{APPNAME}-iknl-user\"\n containers.list.return_value = [container1]\n\n log_worker.return_value = \"\"\n time_.sleep.side_effect = KeyboardInterrupt()\n\n runner = CliRunner()\n result = runner.invoke(cli_node_attach, ['--name', 'iknl'])\n\n self.assertEqual(\n result.output,\n \"[info] - Closing log file. Keyboard Interrupt.\\n\"\n )\n self.assertEqual(result.exit_code, 0)", "def _check(self) -> bool:\n path, base_path = self.list_path[-1]\n if \"override\" in path:\n return True\n command = \"cd {} && docker-compose config\".format(\n os.path.dirname(get_path(path, base_path))\n )\n ret = console.run(command, get_stdout=False, silent=True)\n if not ret:\n console.run(command)\n return ret", "def docker_available(): # type: () -> bool\n return bool(get_docker_command())", "def _is_booted_from_volume(self, instance, disk_mapping=None):\n return not bool(instance.get('image_ref'))", "def check_for_docker_compose_file():\n return os.path.isfile(DOCKER_COMPOSE_FILE)", "def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True", "def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0", "def is_container(self, scopestr: str):\n return scopestr in self.con_scopestr_to_node", "def validateName(name):\n return name in [container.name for container in DOCKER_CLIENT.containers.list()]", "def docker_run(self) -> bool:\n containers = self.client.containers.list(filters={\"name\": self.cname})\n if containers:\n self.container = containers[0]\n return False\n\n info(\"Starting container {}...\".format(self.cname), nl=False)\n self.container = self.client.containers.run(\n image=self.image,\n detach=True,\n auto_remove=False,\n environment=self.env,\n hostname=self.dist,\n init=True,\n name=self.cname,\n remove=False,\n stdin_open=sys.stdin.isatty(),\n tty=True,\n volumes=self.volumes,\n entrypoint=\"bash\",\n command=[],\n )\n info(\"Done!\")\n\n return True", "def _do_attach(self, attach_device):\n try:\n if attach_device is not None:\n log.debug(\"Attaching volume '%s' to instance '%s' as device '%s'\" %\n (self.volume_id, self.app.cloud_interface.get_instance_id(),\n attach_device))\n self.volume.attach(\n self.app.cloud_interface.get_instance_id(), attach_device)\n else:\n log.error(\"Attaching volume '%s' to instance '%s' failed because \"\n \"could not determine device.\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id()))\n return False\n except EC2ResponseError, e:\n if e.error_code == 'InvalidVolume.ZoneMismatch':\n msg = (\"Volume '{0}' is located in the wrong availability zone \"\n \"for this instance. You MUST terminate this instance \"\n \"and start a new one in zone '{1}' instead of '{2}' \"\n \"to be able to use this volume.\"\n .format(self.volume_id, self.volume.zone,\n self.app.cloud_interface.get_zone()))\n self.app.msgs.critical(msg)\n log.error(msg)\n self.fs.state = service_states.ERROR\n else:\n log.error(\"Attaching volume '%s' to instance '%s' as device '%s' failed. \"\n \"Exception: %s (%s)\" % (self.volume_id,\n self.app.cloud_interface.get_instance_id(),\n attach_device, e.message,\n e.error_code))\n return False\n return self.status", "def __check_db_container(self, mode='running'):\n if mode == 'running':\n cmd_docker = ['docker', 'ps']\n elif mode == 'exist':\n cmd_docker = ['docker', 'ps', '-a']\n else:\n raise DockerExecError('Invalid container check mode: {}.'.format(mode))\n\n\n proc_docker = subprocess.Popen(cmd_docker,\n stdout=subprocess.PIPE)\n proc_grep = subprocess.Popen(['grep', self.__db_cont_name],\n stdin=proc_docker.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc_grep.communicate()\n output = str(stdout).split()\n LOGGER.debug(output)\n try:\n container_image = output[1]\n container_name = output[-1]\n container_port = output[-2]\n # remove new line spacial character\n container_name = container_name.rstrip(\"\\\\n'\")\n container_port = find_xtport(container_port) \n except IndexError:\n container_name = None\n container_image = None\n container_port = None\n \n LOGGER.debug('Found that there is an existing container with the name: {}'.format(container_name))\n\n if container_name == self.__db_cont_name:\n if container_image == self.__db_image:\n if mode == 'running':\n self.__is_db_running = True\n elif mode == 'exist':\n self.__is_db_exist = True\n if container_port != self.__dbport:\n LOGGER.warning('Using as external container port: {}'.format(container_port))\n self.__dbport = container_port\n else:\n msg = ('The name \\\"{}\\\" is used by another container.'\n 'Could not create postgres database container.' \n 'Please use other db container name.').format(self.__db_cont_name)\n raise DockerExecError(msg)", "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def attached(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"attached\"),\n )", "def exists(name, path=None):\n\n _exists = name in ls_(path=path)\n # container may be just created but we did cached earlier the\n # lxc-ls results\n if not _exists:\n _exists = name in ls_(cache=False, path=path)\n return _exists", "def is_container(soln_stk, container_config):\n\n return (is_preconfigured(soln_stk, container_config) or\n is_generic(soln_stk, container_config))", "def is_container_type_correct(self, container):\r\n return container in self.containers", "def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False", "def test_container_exists():\n return exec_fn(_test_container_exists)", "def contains(self, container: Container) -> bool:\n self.network.reload()\n return container in self.network.containers", "def attach_pwn(args):\n container_name = _read_container_name()\n\n # FIXME Is it better that we just exec it with given name?\n conts = container.list(filters={'name':container_name})\n if len(conts) != 1:\n raise InstallationError('Installation seems to be run. There are more than one image called ancypwn')\n _attach_interactive(conts[0].name)", "def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool\n try:\n docker_command(args, ['image', 'inspect', image], capture=True)\n except SubprocessError:\n return False\n\n return True" ]
[ "0.63328063", "0.6257298", "0.60977393", "0.5650763", "0.56292164", "0.55628216", "0.55491996", "0.55406547", "0.54978305", "0.54968077", "0.5455763", "0.54142636", "0.5397942", "0.53714055", "0.5338586", "0.5338043", "0.531101", "0.52852744", "0.52593184", "0.52390534", "0.52345896", "0.52303344", "0.52088815", "0.52071935", "0.51453775", "0.514067", "0.51380503", "0.5137038", "0.5118645", "0.5091276" ]
0.73680145
0
Reboot a container. path path to the container parent
def reboot(name, path=None): ret = {"result": True, "changes": {}, "comment": f"{name} rebooted"} does_exist = exists(name, path=path) if does_exist and (state(name, path=path) == "running"): try: stop(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = f"Unable to stop container: {exc}" ret["result"] = False return ret if does_exist and (state(name, path=path) != "running"): try: start(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = f"Unable to stop container: {exc}" ret["result"] = False return ret ret["changes"][name] = "rebooted" return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])", "def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")", "def reboot(self, node):", "def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def _restart(self, docker_image_name):\n if self.docker_repo:\n docker_image_name = '%s/%s' % (self.docker_repo,\n docker_image_name)\n\n docker_container_name = '%s_%s' % (self.docker_image_name_prefix,\n self.scope)\n\n mounts = [\n DockerMount('/dev/log', '/dev/log', type='bind'),\n DockerMount(self.paths.sandbox_pipe_dir,\n self.paths.host_pipe_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_base_dir,\n self.paths.host_storlet_base_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_native_lib_dir,\n self.paths.host_storlet_native_lib_dir,\n type='bind', read_only=True),\n DockerMount(self.paths.sandbox_storlet_native_bin_dir,\n self.paths.host_storlet_native_bin_dir,\n type='bind', read_only=True)\n ]\n\n try:\n client = docker.from_env()\n # Stop the existing storlet container\n try:\n scontainer = client.containers.get(docker_container_name)\n except docker.errors.NotFound:\n # The container is not yet created\n pass\n else:\n scontainer.stop(timeout=self.sandbox_stop_timeout)\n\n # Check whether a new container can be started\n if self.max_containers_per_node > 0:\n all_scontainers = client.containers.list(\n filters={'label': 'managed_by=storlets'})\n if len(all_scontainers) >= self.max_containers_per_node:\n raise StorletRuntimeException(\n \"Cannot start a container because of limit\")\n\n # Start the new one\n client.containers.run(\n docker_image_name, detach=True, name=docker_container_name,\n network_disabled=True, mounts=mounts, user='swift',\n auto_remove=True, stop_signal='SIGHUP',\n labels={'managed_by': 'storlets'})\n except docker.errors.ImageNotFound:\n msg = \"Image %s is not found\" % docker_image_name\n raise StorletRuntimeException(msg)\n except docker.errors.APIError:\n self.logger.exception(\"Failed to manage docker containers\")\n raise StorletRuntimeException(\"Docker runtime error\")", "def reboot(self):\n raise NotImplementedError", "def reboot(host=None):\r\n if host:\r\n host.reboot()", "def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str", "def restart(self):\n self.paths.create_host_pipe_dir()\n\n docker_image_name = self.scope\n try:\n self._restart(docker_image_name)\n self.wait()\n except StorletTimeout:\n raise\n except StorletRuntimeException:\n # We were unable to start docker container from the tenant image.\n # Let us try to start docker container from default image.\n self.logger.exception(\"Failed to start docker container from \"\n \"tenant image %s\" % docker_image_name)\n\n self.logger.info(\"Trying to start docker container from default \"\n \"image: %s\" % self.default_docker_image_name)\n self._restart(self.default_docker_image_name)\n self.wait()", "def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def reboot(self, name=None):\n raise NotImplementedError", "def restart_container(self, container_node):\n try:\n print(\"Restarting container: \", container_node.get_container_id())\n container = self.docker_client.containers.get(container_node.get_container_id())\n container.restart()\n return True\n except docker.errors.APIError as de:\n print(\"Error restarting the container\")\n traceback.print_exc()\n print de\n return False", "def reboot(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n f.write(\"REBOOT\")\n log.info(\"Reboot ...\")", "def run_image(self):\n LOG.info('Rebooting system')\n self._run_shutdown_command('reboot')", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def restart_kernel(self, kernel_id, now=False):", "def reboot(*args):\n args = list(sys.argv) + list(args)\n if args[0] == 'python' or not args[0]:\n args[0] = BIN_PYTHON\n elif os.path.basename(sys.argv[0]) in ['lore', 'lore.exe']:\n args[0] = BIN_LORE\n try:\n os.execv(args[0], args)\n except Exception as e:\n if args[0] == BIN_LORE and args[1] == 'console' and JUPYTER_KERNEL_PATH:\n print(ansi.error() + ' Your jupyter kernel may be corrupt. Please remove it so lore can reinstall:\\n $ rm ' + JUPYTER_KERNEL_PATH)\n raise e", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def reboot_instance(InstanceId=None):\n pass", "def reboot(self, client, sec):\r\n result = client.reboot(sec)\r\n return result", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def rollbackSnapshotLXCContainer(self,node,vmid,snapname):\n post_data = {}\n data = self.connect('post','nodes/%s/lxc/%s/snapshot/%s/rollback' % (node,vmid,snapname), post_data) \n return data", "def reboot(self):\n module = 'reboot'\n method = 'POST'\n print(self.device + ' Calling reboot command on the device')\n response = self.axapi_call(module, method,'')\n if '2' in str(response.status_code):\n print(self.device + ' Reboot command successfully received, device will reboot momentarily, please wait')\n else:\n print(self.device + ' There was an error in issuing the reboot command, device may not have rebooted, please verify manually')", "async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def reboot(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.reboot_server(server)\n\n return r", "def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"" ]
[ "0.64766204", "0.61042136", "0.60781366", "0.588684", "0.5737953", "0.569218", "0.5689162", "0.5683573", "0.56762296", "0.5630647", "0.56163853", "0.56092185", "0.55962807", "0.55416054", "0.5506293", "0.5498973", "0.54100186", "0.539479", "0.53912336", "0.5376954", "0.534544", "0.5317452", "0.5300186", "0.5300186", "0.52996737", "0.52988327", "0.5296919", "0.52934253", "0.52932566", "0.5282088" ]
0.6835589
0
Reconfigure a container. This only applies to a few property name Name of the container. utsname utsname of the container.
def reconfigure( name, cpu=None, cpuset=None, cpushare=None, memory=None, profile=None, network_profile=None, nic_opts=None, bridge=None, gateway=None, autostart=None, utsname=None, rootfs=None, path=None, **kwargs, ): changes = {} cpath = get_root_path(path) path = os.path.join(cpath, name, "config") ret = { "name": name, "comment": f"config for {name} up to date", "result": True, "changes": changes, } profile = get_container_profile(copy.deepcopy(profile)) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, _marker) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is _marker: return profile_match return kw_overrides_match if nic_opts is not None and not network_profile: network_profile = DEFAULT_NIC if autostart is not None: autostart = select("autostart", autostart) else: autostart = "keep" if not utsname: utsname = select("utsname", utsname) if os.path.exists(path): old_chunks = read_conf(path, out_format="commented") make_kw = salt.utils.odict.OrderedDict( [ ("utsname", utsname), ("rootfs", rootfs), ("autostart", autostart), ("cpu", cpu), ("gateway", gateway), ("cpuset", cpuset), ("cpushare", cpushare), ("network_profile", network_profile), ("nic_opts", nic_opts), ("bridge", bridge), ] ) # match 0 and none as memory = 0 in lxc config is harmful if memory: make_kw["memory"] = memory kw = salt.utils.odict.OrderedDict() for key, val in make_kw.items(): if val is not None: kw[key] = val new_cfg = _config_list(conf_tuples=old_chunks, **kw) if new_cfg: edit_conf(path, out_format="commented", lxc_config=new_cfg) chunks = read_conf(path, out_format="commented") if old_chunks != chunks: ret["comment"] = f"{name} lxc config updated" if state(name, path=path) == "running": cret = reboot(name, path=path) ret["result"] = cret["result"] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())", "def reconfigure(client, instance_name, command):\n\n # 'command' has 3 parts in a list (1 Command and 2 ARGs)\n exec_Id = client.exec_create(container=instance_name, cmd=command)\n\n exec_start_resp = client.exec_start(exec_Id, stream=True)\n\n # Using a 'single' generator response to solve issue of 'start_exec' returning control after 6 minutes\n for response in exec_start_resp:\n dlog.info(\"Reconfig Script execution response: {:}\".format(response))\n exec_start_resp.close()\n break", "def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)", "def container_name(self, container_name):\n\n self._container_name = container_name", "def reconfigure(self):\n log.debug('Reconfiguring and restarting the DHCP daemon...')\n\n # Don't set the daemon running status here, but let the status\n # check take care of that.\n\n p = Properties(self.storage, CONFIG_SECTION)\n p.addCallback(self.changed).\\\n addCallback(lambda trigger: p.load()).\\\n addCallback(self.emit_config, p).\\\n addCallback(self.restart_daemon).\\\n addErrback(self.restart_error)", "def _update_container(self):\n client = docker.from_env()\n self.container = client.containers.get(self.session.container_id)", "def _translate_docker_properties(self):\n self.spec.setdefault(\"name\", self.spec.pop(\"container_name\", self.name))\n self.spec.setdefault(\"command\", shlex.split(self.spec.pop(\"entrypoint\", \"\")))\n self.spec.setdefault(\"args\", shlex.split(self.spec.pop(\"cmd\", \"\")))\n \n self.spec.setdefault(\"env\", _make_env(self.spec.pop(\"environment\", {})))\n for env in self.spec.get(\"env\", []):\n if \"value\" in env:\n env[\"value\"] = str(env[\"value\"])\n\n self.spec.setdefault(\"stdin\", self.spec.pop(\"stdin_open\", None))\n self.spec.setdefault(\"workingDir\", self.spec.pop(\"working_dir\", None))\n\n privileged = self.spec.pop(\"privileged\", None)\n if privileged:\n self.spec.setdefault(\"securityContext\", {})\n self.spec[\"securityContext\"].setdefault(\"privileged\", privileged)\n\n # Clean-up any empty fields\n self.spec = {k: v for k, v in self.spec.items() if v}", "def reconfigure(self, behavior_params=None, async=True):\n self._op_api.run('api.service.reconfigure',\n func=self.do_reconfigure,\n func_kwds={'behavior_params': behavior_params},\n async=async,\n exclusive=True)", "def test_update_container(self):\n pass", "def restoreConfigName( ):\n subNo = s.getSubarrayNo()\n configMpName = \"Control.Subarray%d.configName\" %subNo\n retries = 24 \n arrayConfig = queryString( configMpName, retries )\n configName(arrayConfig)", "def configName(name):\n s.setConfigName(name)", "def update_container_name(self, backup, container):\n return container", "def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def set_property(wellorcontainer, property_name, value):\n wells = convert_to_wellgroup(wellorcontainer)\n \n if not isinstance(value, str):\n value = str(value)\n \n for well in wells:\n assert isinstance(well, Well)\n well.properties[property_name] = value", "def set(name):\n set_config(name)", "def reconfigure(self, consensus=None):\r\n pass", "def prepare_container_change(instance, **kwargs):\n prepare_permission_change(instance)\n instance._old_containers = set(instance.containers)", "def name(self, name):\n\n self.container['name'] = name", "def name(self, name):\n\n self.container['name'] = name", "def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)", "def __update_container(self, path, obj_stat):\n try:\n self.logger.debug('Update container interface called')\n return self.asyn_helper.call \\\n (\"update_container\", path, obj_stat)\n except Exception as err:\n self.logger.error(('update_container for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def set_properties(old, new, self_name=None):\n properties = {\n 'name': self_name if self_name else old.name,\n 'hostname': old.attrs['Config']['Hostname'],\n 'user': old.attrs['Config']['User'],\n 'detach': True,\n 'domainname': old.attrs['Config']['Domainname'],\n 'tty': old.attrs['Config']['Tty'],\n 'ports': None if not old.attrs['Config'].get('ExposedPorts') else [\n (p.split('/')[0], p.split('/')[1]) for p in old.attrs['Config']['ExposedPorts'].keys()\n ],\n 'volumes': None if not old.attrs['Config'].get('Volumes') else [\n v for v in old.attrs['Config']['Volumes'].keys()\n ],\n 'working_dir': old.attrs['Config']['WorkingDir'],\n 'image': new.tags[0],\n 'command': old.attrs['Config']['Cmd'],\n 'host_config': old.attrs['HostConfig'],\n 'labels': old.attrs['Config']['Labels'],\n 'entrypoint': old.attrs['Config']['Entrypoint'],\n 'environment': old.attrs['Config']['Env'],\n 'healthcheck': old.attrs['Config'].get('Healthcheck', None)\n }\n\n return properties", "def _restart(self, docker_image_name):\n if self.docker_repo:\n docker_image_name = '%s/%s' % (self.docker_repo,\n docker_image_name)\n\n docker_container_name = '%s_%s' % (self.docker_image_name_prefix,\n self.scope)\n\n mounts = [\n DockerMount('/dev/log', '/dev/log', type='bind'),\n DockerMount(self.paths.sandbox_pipe_dir,\n self.paths.host_pipe_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_base_dir,\n self.paths.host_storlet_base_dir,\n type='bind'),\n DockerMount(self.paths.sandbox_storlet_native_lib_dir,\n self.paths.host_storlet_native_lib_dir,\n type='bind', read_only=True),\n DockerMount(self.paths.sandbox_storlet_native_bin_dir,\n self.paths.host_storlet_native_bin_dir,\n type='bind', read_only=True)\n ]\n\n try:\n client = docker.from_env()\n # Stop the existing storlet container\n try:\n scontainer = client.containers.get(docker_container_name)\n except docker.errors.NotFound:\n # The container is not yet created\n pass\n else:\n scontainer.stop(timeout=self.sandbox_stop_timeout)\n\n # Check whether a new container can be started\n if self.max_containers_per_node > 0:\n all_scontainers = client.containers.list(\n filters={'label': 'managed_by=storlets'})\n if len(all_scontainers) >= self.max_containers_per_node:\n raise StorletRuntimeException(\n \"Cannot start a container because of limit\")\n\n # Start the new one\n client.containers.run(\n docker_image_name, detach=True, name=docker_container_name,\n network_disabled=True, mounts=mounts, user='swift',\n auto_remove=True, stop_signal='SIGHUP',\n labels={'managed_by': 'storlets'})\n except docker.errors.ImageNotFound:\n msg = \"Image %s is not found\" % docker_image_name\n raise StorletRuntimeException(msg)\n except docker.errors.APIError:\n self.logger.exception(\"Failed to manage docker containers\")\n raise StorletRuntimeException(\"Docker runtime error\")", "def container_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n self.client = docker.from_env()\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n try:\n containers = self.client.containers.list(all, **kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n Rest.delete('Container', filter)\n continue\n if len(containers) == 0:\n print(\"No containers exist \" + str(host['Ip']))\n Rest.delete('Container', filter)\n continue\n\n for containerm in containers:\n container = containerm.__dict__['attrs']\n container['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(container)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = container['Id']\n d['Name'] = container['Name']\n d['Image'] = container['Config']['Image']\n d['Status'] = container['State']['Status']\n d['StartedAt'] = container['State']['StartedAt']\n e[n] = d\n n = n + 1\n Rest.delete('Container', filter)\n Rest.post('Container', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Image', 'Status', 'StartedAt'])))", "def test_update_container_privilege(self):\n pass", "def set_hostname(dut, host_name):\n cmd = \"sudo hostname {}\".format(host_name)\n st.config(dut, cmd)\n return", "def reset_attribute(attr_config):\n obj = pm.PyNode(attr_config[\"ctl\"])\n attr = attr_config[\"longName\"]\n\n attribute.reset_selected_channels_value(objects=[obj], attributes=[attr])", "def unconfigure_username(device, username):\n try:\n device.configure('no username {username}'.format(username=username))\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Failed to unconfigure user {username}\".format(username=username)\n )", "def container_status_change(self, status=None, containerName=None, kwargs=None):\n if status is None:\n Console.info(\"No status specified\")\n return\n\n try:\n container = self.client.containers.get(containerName)\n # need to check this ..\n if status is \"start\":\n container.start(**kwargs)\n elif status is \"pause\":\n container.pause(**kwargs)\n elif status is \"unpause\":\n container.unpause(**kwargs)\n elif status is \"stop\":\n container.stop(**kwargs)\n else:\n Console.error('Invalid Commmand')\n return\n\n container = self.client.containers.get(containerName)\n filter = {}\n container_dict = container.__dict__['attrs']\n filter['Id'] = container_dict['Id']\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n container_dict['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n Rest.post('Container', container_dict, filter)\n Console.ok('Container ' + container.name + ' status changed to ' + status)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return", "def test_destroy_container(self):\n pass" ]
[ "0.5688802", "0.5612379", "0.5199279", "0.5175965", "0.5149504", "0.5137428", "0.5076634", "0.50598806", "0.5049449", "0.5042591", "0.5017216", "0.49024123", "0.48940125", "0.48901016", "0.488051", "0.48335877", "0.48168996", "0.48078325", "0.48078325", "0.47639975", "0.47560486", "0.47046858", "0.4698786", "0.46975294", "0.46563613", "0.46157727", "0.45882848", "0.45784166", "0.45722488", "0.45261696" ]
0.60598457
0
Returns a container pid. Throw an exception if the container isn't running.
def get_pid(name, path=None): if name not in list_(limit="running", path=path): raise CommandExecutionError( f"Container {name} is not running, can't determine PID" ) info = __salt__["cmd.run"](f"lxc-info -n {name}").split("\n") pid = [ line.split(":")[1].strip() for line in info if re.match(r"\s*PID", line) is not None ][0] return pid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pid_from_container(container_id):\n\n cmd = ['pgrep', '-f', container_id]\n try:\n return subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get pid, error=%s', error)\n return 1", "def container_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"container_id\")", "def container_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"container_id\")", "def _get_infrastructure_pid(self, container_id: str) -> str:\n docker_client = self._get_client()\n base_url = docker_client.api.base_url\n docker_client.close()\n return f\"{base_url}:{container_id}\"", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def get_docker_container_id(): # type: () -> t.Optional[str]\n path = '/proc/self/cpuset'\n container_id = None\n\n if os.path.exists(path):\n # File content varies based on the environment:\n # No Container: /\n # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507\n # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891\n # Podman: /../../../../../..\n contents = read_text_file(path)\n\n cgroup_path, cgroup_name = os.path.split(contents.strip())\n\n if cgroup_path in ('/docker', '/azpl_job'):\n container_id = cgroup_name\n\n if container_id:\n display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)\n\n return container_id", "def get_pid(self):\n try:\n pf = open(self.pidfile,'r')\n pid = int(pf.read().strip())\n pf.close()\n except (IOError, TypeError):\n pid = None\n return pid", "def get_daemon_pid():\n try:\n return _get_pid_from_pidfile()\n except (FileNotFoundError, ValueError):\n return None", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def pid(self):\n return self._process.pid", "def pid(self):\n return self._get_process_id()", "def pid(self):\n return self._query_status()['pid']", "def get_current_container_id():\n with open('/proc/self/cgroup', 'rt') as cgroup_file:\n for line in cgroup_file.readlines():\n return re.sub(r'^docker-', '', re.sub(r'\\.scope$', '', re.sub(r'^.*\\/', '', line.strip())))", "def get_pid(pidfile):\n pid = None\n if os.path.exists(pidfile):\n with open(pidfile, 'r') as f:\n pid = f.read()\n return pid", "def containerID(self):\n return self._container", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def get_pid_from_name(process_name:str) -> int:\r\n\tfor process in psutil.process_iter():\r\n\t\tif process_name in process.name():\r\n\t\t\treturn process.pid\r\n\traise ProcessLookupError(\"process '\" + process_name + \"' not found.\")", "def get_pidfile(self):\n pid = None\n \n # checking if the file exists on system\n if not os.path.exists(self._pidfile):\n return pid\n \n # read the pid\n with open(self._pidfile, 'r') as f:\n pid = int(f.read().strip())\n\n return pid", "def get_pid(self):\n\t\ttry:\n\t\t\tpid_file = open(self.pid_file_path, 'r');\n\t\t\tpid = int(pid_file.read().strip())\n\t\t\tpid_file.close()\n\t\texcept:\n\t\t\tpid = -1;\n\t\treturn pid", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def container_port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"container_port\")", "def getpid(command):\n try:\n _pidof = executeCommand(command)\n except Exception as er:\n print (\" not able to get pid\")\n return False\n return _pidof", "def read_pid(self):\n result = read_pid_from_pidfile(self.path)\n return result", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None" ]
[ "0.7870028", "0.7010304", "0.70092154", "0.687627", "0.68020564", "0.6793576", "0.67661756", "0.6732722", "0.6683302", "0.6612565", "0.65184677", "0.64688176", "0.6462593", "0.645608", "0.6442939", "0.6440602", "0.6401523", "0.63934046", "0.63591", "0.6311469", "0.6294774", "0.62540513", "0.624851", "0.62029016", "0.61609995", "0.6100227", "0.6100227", "0.6100227", "0.60610104", "0.6052722" ]
0.7870223
0
Add a veth to a container.
def add_veth(name, interface_name, bridge=None, path=None): # Get container init PID pid = get_pid(name, path=path) # Generate a ramdom string for veth and ensure that is isn't present on the system while True: random_veth = "veth" + "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6) ) if random_veth not in __salt__["network.interfaces"]().keys(): break # Check prerequisites if not __salt__["file.directory_exists"]("/var/run/"): raise CommandExecutionError( "Directory /var/run required for lxc.add_veth doesn't exists" ) if not __salt__["file.file_exists"](f"/proc/{pid}/ns/net"): raise CommandExecutionError( f"Proc file for container {name} network namespace doesn't exists" ) if not __salt__["file.directory_exists"]("/var/run/netns"): __salt__["file.mkdir"]("/var/run/netns") # Ensure that the symlink is up to date (change on container restart) if __salt__["file.is_link"](f"/var/run/netns/{name}"): __salt__["file.remove"](f"/var/run/netns/{name}") __salt__["file.symlink"](f"/proc/{pid}/ns/net", f"/var/run/netns/{name}") # Ensure that interface doesn't exists interface_exists = 0 == __salt__["cmd.retcode"]( "ip netns exec {netns} ip address list {interface}".format( netns=name, interface=interface_name ) ) if interface_exists: raise CommandExecutionError( "Interface {interface} already exists in {container}".format( interface=interface_name, container=name ) ) # Create veth and bring it up if ( __salt__["cmd.retcode"]( "ip link add name {veth} type veth peer name {veth}_c".format( veth=random_veth ) ) != 0 ): raise CommandExecutionError(f"Error while creating the veth pair {random_veth}") if __salt__["cmd.retcode"](f"ip link set dev {random_veth} up") != 0: raise CommandExecutionError( f"Error while bringing up host-side veth {random_veth}" ) # Attach it to the container attached = 0 == __salt__["cmd.retcode"]( "ip link set dev {veth}_c netns {container} name {interface_name}".format( veth=random_veth, container=name, interface_name=interface_name ) ) if not attached: raise CommandExecutionError( "Error while attaching the veth {veth} to container {container}".format( veth=random_veth, container=name ) ) __salt__["file.remove"](f"/var/run/netns/{name}") if bridge is not None: __salt__["bridge.addif"](bridge, random_veth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put_container(self, account, container):\n \n pass", "def test_sample_container_add(self):\n retval = self.container.add(\"key1\", [\"value1\"], 5)\n\n self.assertEqual(retval, None)\n self.assertEqual([\"value1\", ], self.container._data[\"key1\"])", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.container)", "def add_fruit(self):\n # print('fruit added to container')", "def add_cloudyvent_vm(self, runname, iaasid, nodeid, hostname, service_type, parent, runlogdir, vmlogdir):\n cyvm = self.get_by_iaasid(iaasid)\n if not cyvm:\n cyvm = _CYVM(runname, iaasid, nodeid, hostname, service_type, parent, runlogdir, vmlogdir)\n self.session.add(cyvm)\n return True\n else:\n cyvm.hostname = hostname\n cyvm.service_type = service_type\n cyvm.nodeid = nodeid\n cyvm.parent = parent\n return False", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.eS)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def test_add_container(self):\n with DockerHost('host', dind=False) as host:\n # Create a container with --net=none, add a calico interface to\n # it then check felix programs a route.\n node = host.create_workload(\"node\", network=NET_NONE)\n host.calicoctl(\"container add %s 192.168.1.1\" % node)\n\n # Create the profile, get the endpoint IDs for the containers and\n # add the profile to the endpoint so felix will pick it up.\n host.calicoctl(\"profile add TEST_GROUP\")\n ep = host.calicoctl(\"container %s endpoint-id show\" % node)\n host.calicoctl(\"endpoint %s profile set TEST_GROUP\" % ep)\n\n # Wait for felix to program down the route.\n check_route = partial(host.execute,\n \"ip route | grep '192\\.168\\.1\\.1'\")\n retry_until_success(check_route, ex_class=CalledProcessError)", "def add_volume(self, volume: 'Volume'):\n self.volumes.append(volume)", "def add_containers(self, num_containers=1, virtual_cores=1, memory=128):\n self.master.addContainers(num_containers, virtual_cores, memory)", "def add_vdisk(client, resource_group_name, vm_name, controller=\"1000\",\n independence_mode=\"persistent\", size=16777216):\n from .vendored_sdks.models import VirtualDisk\n\n virtual_machine = client.get(resource_group_name, vm_name)\n disk = VirtualDisk(controller_id=controller,\n independence_mode=independence_mode,\n total_size=size)\n\n virtual_machine.disks.append(disk)\n return client.create_or_update(resource_group_name, vm_name, virtual_machine)", "def addContainer(self, nwbfile):\n raise NotImplementedError('Cannot run test unless addContainer is implemented')", "def add_virtualip(self, vip):\n return self.manager.add_virtualip(self, vip)", "def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory", "def add(self, name, container):\n\n if name not in self.keys:\n self.keys[name] = container\n else:\n raise KeyError('key already exists')", "def add_volume(self, size=100):\n tfvars_file = \"terraform.tfvars.json\"\n with open(os.path.join(self.cluster_path, tfvars_file)) as f:\n tfvars = json.load(f)\n\n cluster_id = tfvars['cluster_id']\n worker_pattern = f'{cluster_id}-worker*'\n logger.info(f'Worker pattern: {worker_pattern}')\n self.create_ebs_volumes(worker_pattern, size)", "def creating_container(self, service):\n self.sync_code(service=service)\n externals = [v.external for v in service.options['volumes']]\n for v in self.get_hotcode_volumes(service):\n if v.external not in externals:\n service.options['volumes'].append(v)", "def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None):\r\n return self.set(key, value, timeout, client=client, nx=True)", "def _add_intf_to_vlan(self, conn, vlan_id, interface):\n\n obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')\n\n resp = conn.get(obj)\n intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])\n\n crt_vlist = self._get_vlist(intf_info['vlans'])\n if vlan_id in crt_vlist:\n return\n\n new_vlist = crt_vlist[ : ]\n new_vlist.append(vlan_id)\n\n pvid = intf_info['pvid']\n mode = 'trunk'\n\n resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)\n self._check_process_resp(resp)", "def addContainer(self, nwbfile):\n nwbfile.add_acquisition(self.clustering)\n nwbfile.add_acquisition(self.container)", "def push(self, value):\n self.container.append(value)", "def add_virtual_network(self, hVirtNet, nFlags = 0):\n\t\treturn Job(SDK.PrlSrv_AddVirtualNetwork(self.handle, conv_handle_arg(hVirtNet), nFlags)[0])", "def add_network(self, name_of_vm, port_group):\n adapter_type = 'e1000'\n vds = \"yes\"\n try:\n # import sys,pdb;pdb.Pdb(stdout=sys.__stdout__).set_trace()\n vmachine = self.vcenter.get_dc_object([vim.VirtualMachine], name_of_vm)\n\n if vds == 'yes':\n network = self.vcenter.get_dc_object([vim.dvs.DistributedVirtualPortgroup], port_group)\n else:\n network = self.get_network(port_group)\n\n new_nic = vim.vm.ConfigSpec()\n nic_changes = []\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n if adapter_type == 'e1000':\n nic_spec.device = vim.vm.device.VirtualE1000()\n elif adapter_type == 'vmxnet2':\n nic_spec.device = vim.vm.device.VirtualVmxnet2()\n else:\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.deviceInfo = vim.Description()\n if vds == 'yes':\n vir_port = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n nic_spec.device.backing = vir_port\n dvs_port_connection = vim.dvs.PortConnection()\n dvs_port_connection.portgroupKey = network.key\n dvs_port_connection.switchUuid = network.config.distributedVirtualSwitch.uuid\n nic_spec.device.backing.port = dvs_port_connection\n else:\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.backing.network = network\n nic_spec.device.backing.deviceName = port_group\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n nic_spec.device.connectable.status = 'untried'\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = 'assigned'\n nic_changes.append(nic_spec)\n new_nic.deviceChange = nic_changes\n add_nic = vmachine.ReconfigVM_Task(spec=new_nic)\n log.info('Adding Network adapter to the VM...')\n while add_nic.info.state not in ['success', 'error']:\n time.sleep(1)\n status = add_nic.info.state\n if status == 'success':\n log.info('Nic added successfully: {}'.format(name_of_vm))\n if status == 'error':\n log.info('Could not add Network adapter {}'.format(name_of_vm))\n return status\n\n except Exception as error:\n log.info(\"Caught exception: {} \\n {}\".format(error, error.message))", "def add(self, bento_name, bento_version):", "def addDocker( self, name, **params ):\n defaults={'dimage': self.dimage, 'hostExchangeFolder': self.hostExchangeFolder}\n defaults.update(params)\n return self.addHost( name, cls=Docker, **defaults )", "def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)", "def vm_diskadd(args):\n name = args.name\n size = args.size\n template = args.template\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if size is None:\n common.pprint(\"Missing size. Leaving...\", color='red')\n os._exit(1)\n if pool is None:\n common.pprint(\"Missing pool. Leaving...\", color='red')\n os._exit(1)\n if name is None:\n common.pprint(\"Missing name. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding disk to %s...\" % name)\n k.add_disk(name=name, size=size, pool=pool, template=template)" ]
[ "0.58298266", "0.58002985", "0.56908727", "0.56471306", "0.5491566", "0.54700404", "0.54462945", "0.54462945", "0.54462945", "0.54462945", "0.543176", "0.54148674", "0.53822345", "0.5328974", "0.53262883", "0.52914095", "0.52834374", "0.52764374", "0.52595776", "0.5217109", "0.51869327", "0.5163967", "0.51217175", "0.50900155", "0.5056049", "0.5054831", "0.5053933", "0.504815", "0.5044011", "0.5036465" ]
0.6933137
0
This function takes a spectrum and integrates the given axis. The function assumes that the incoming data is in the histogram form.
def integrate_axis_py(obj, **kwargs): # import the helper functions import hlr_utils # set up for working through data o_descr = hlr_utils.get_descr(obj) if o_descr == "number" or o_descr == "list": raise RuntimeError("Must provide a SOM of a SO to the function.") # Go on else: pass # Check for starting bin try: start = kwargs["start"] except KeyError: start = 0 # Check for ending bin try: end = kwargs["end"] if end != -1: end += 1 else: pass except KeyError: end = -1 # Check for axis keyword argument try: axis = kwargs["axis"] except KeyError: axis = "y" # Check for axis_pos keyword argument try: axis_pos = kwargs["axis_pos"] except KeyError: axis_pos = 0 # Check for avg keyword argument try: avg = kwargs["avg"] except KeyError: avg = False # Check for width keyword argument try: width = kwargs["width"] except KeyError: width = False # Check for width_pos keyword argument try: width_pos = kwargs["width_pos"] except KeyError: width_pos = 0 integration = float(0) integration_error2 = float(0) import itertools if width: import utils bad_values = ["nan", "inf", "-inf"] for i in xrange(hlr_utils.get_length(obj)): counter = 0 value = hlr_utils.get_value(obj, i, o_descr, axis, axis_pos) error = hlr_utils.get_err2(obj, i, o_descr, axis, axis_pos) if end == -1: value = value[start:] error = error[start:] else: value = value[start:end] error = error[start:end] if not width: for val, err2 in itertools.izip(value, error): if str(val) in bad_values or str(err2) in bad_values: continue else: integration += val integration_error2 += err2 counter += 1 else: if axis == "y": x_axis = hlr_utils.get_value(obj, i, o_descr, "x", width_pos) x_err2 = hlr_utils.get_err2(obj, i, o_descr, "x", width_pos) elif axis == "x": raise RuntimeError("Cannot use width flag with x-axis "\ +"integration") bin_widths = utils.calc_bin_widths(x_axis, x_err2) for val, err2, delta in itertools.izip(value, error, bin_widths[0]): if str(val) in bad_values or str(err2) in bad_values: continue else: integration += (delta * val) integration_error2 += (delta * delta * err2) counter += 1 if avg: return (integration / float(counter), integration_error2 / float(counter)) else: return (integration, integration_error2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integral(self, axis=None):\n if axis is None:\n return (self * self.bset.area).sum()\n\n try:\n measure = numpy.prod([self.bset.binwidths[ax] for ax in axis],\n axis=0)\n except TypeError:\n measure = self.bset.binwidths[axis]\n (self * measure).sum(axis=axis)", "def integrate_spectrum(func, xmin, xmax, ndecade=100, intervals=False):\n is_quantity = False\n if isinstance(xmin, Quantity):\n unit = xmin.unit\n xmin = xmin.value\n xmax = xmax.value\n is_quantity = True\n\n if np.isscalar(xmin):\n logmin = np.log10(xmin)\n logmax = np.log10(xmax)\n n = (logmax - logmin) * ndecade\n x = np.logspace(logmin, logmax, n)\n else:\n x = np.append(xmin, xmax[-1])\n\n if is_quantity:\n x = x * unit\n\n y = func(x)\n\n val = _trapz_loglog(y, x, intervals=intervals)\n\n return val", "def integrate(self, *args):\n wbin = Spectrum._groom_integration_ranges(*args)\n try:\n wbin.to(self.w.unit)\n except AttributeError as xxx_todo_changeme:\n _u.UnitConversionError = xxx_todo_changeme\n raise ValueError('Input must be astropy quantity with units '\n 'convertable to that of spectrum.')\n if _np.any(wbin < self.wbins[0]) or _np.any(wbin > self.wbins[-1]):\n raise ValueError('Integration range is beyond spectrum range.')\n dw = _np.diff(wbin, 1).squeeze()\n newspec = self.rebin(wbin.ravel())\n flux_dens = newspec.y[::2]\n fluxes = flux_dens*dw\n flux = _np.sum(fluxes)\n if self.e is None:\n error = None\n else:\n err_dens = newspec.e[::2]\n errs = err_dens*dw\n error = utils.quadsum(errs)\n return flux, error", "def integrate(x, y, xmin, xmax):\n indexes = get_interval(x, xmin, xmax)\n integral = np.trapz(y[indexes], x[indexes])\n\n return integral", "def integrate(self, *args, **kws):\n gap_handling = kws.get('gap_handling', 'error')\n wbin = Spectrum._groom_integration_ranges(*args)\n\n # check for gap overlap\n # sometimes small numerical errors cause trouble, so compare to size\n # of pixels\n if self.any_gap_overalp(wbin):\n if gap_handling == 'error':\n raise ValueError('Some of the integration ranges cover gaps '\n 'in the spectrum.')\n elif gap_handling == 'zero':\n pass\n else:\n raise ValueError('gap_handling parameter not recognized')\n\n wunit = wbin.unit\n Fs, Es = [], []\n for spec in self.spectra:\n wrange = spec.wbins[[0, -1]].to(wunit).value\n xbins = utils.rangeset_intersect(wbin.value, [wrange])\n if len(xbins) > 0:\n F, E = spec.integrate(xbins*wunit)\n else:\n Funit = spec.y.unit*wunit\n F = 0*Funit\n E = None if spec.e is None else 0*Funit\n Fs.append(F); Es.append(E)\n\n F = sum(Fs)\n if any(E is None for E in Es):\n E = None\n else:\n E = _np.sqrt(sum([E**2 for E in Es]))\n return F, E", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]", "def integrate(self, data, index, integrate_type='integrate'):\n\n if integrate_type not in ['integrate', 'sum']:\n raise ValueError('Integration type is unknown.')\n if isinstance(index, int):\n index = [index]\n axis_shift = 0\n sorted_index = np.sort(index)\n for i in sorted_index:\n axis = i + axis_shift\n if integrate_type == 'integrate':\n dv = self.axes[i].volumes\n data = array_routines.multiply_along_axis(data, dv, axis)\n data = np.sum(data, axis)\n axis_shift -= 1\n return data", "def wavenumber_spectrum(var, x_range=None, axis=0):\n var_k = np.fft.fftn(var)\n E_k = np.mean(np.abs(var_k)**2, axis=axis)\n if x_range is None:\n k = np.arange(len(E_k))\n else:\n k = np.linspace(x_range[0], x_range[1], len(E_k))\n n_steps = len(k) // 2\n E_k = k**2 * E_k\n E_k_a = E_k[1:n_steps + 1]\n E_k_b = E_k[-n_steps:][::-1]\n E_k = E_k_a + E_k_b\n return k[:n_steps], E_k", "def amplitude_n_phase_spectrum(x, axis=1, unwrap_phase=True, normalize_amplitude=True, **kwargs):\n s = fourier_spectra(x, axis=axis, **kwargs)\n amplitude, phase = decompose_spectra(s, **kwargs)\n if unwrap_phase:\n phase = np.unwrap(phase, axis=axis)\n if normalize_amplitude:\n ns = x.shape[axis]\n amplitude /= ns\n return amplitude, phase", "def harmonic_fit(t,X,frq,mask=None,axis=0,phsbase=None):\r\n\r\n t = np.asarray(t)\r\n \r\n # Reshape the array sizes\r\n X = X.swapaxes(0, axis)\r\n sz = X.shape\r\n lenX = np.prod(sz[1:])\r\n \r\n if not len(t) == sz[0]:\r\n raise 'length of t (%d) must equal dimension of X (%s)'%(len(t),sz[0])\r\n \r\n X = np.reshape(X,(sz[0],lenX))\r\n \r\n if not mask == None:\r\n mask = np.reshape(mask,(lenX,))\r\n else:\r\n mask = np.ones((lenX,))\r\n \r\n frq = np.array(frq)\r\n Nfrq = frq.shape[0]\r\n \r\n\r\n \r\n def buildA(t,frq):\r\n \"\"\"\r\n Construct matrix A\r\n \"\"\"\r\n nt=t.shape[0]\r\n nf=frq.shape[0]\r\n nff=nf*2+1\r\n A=np.ones((nt,nff))\r\n for ff in range(0,nf):\r\n A[:,ff*2+1]=np.cos(frq[ff]*t)\r\n A[:,ff*2+2]=np.sin(frq[ff]*t)\r\n \r\n return A\r\n \r\n def lstsqnumpy(A,y): \r\n \"\"\" \r\n Solve the least square problem\r\n \r\n Return:\r\n the complex amplitude \r\n the mean\r\n \"\"\"\r\n N=A.shape[1]\r\n b = np.linalg.lstsq(A,y)\r\n A = b[0][1::2]\r\n B = b[0][2::2]\r\n \r\n return A+1j*B, b[0][0::N]\r\n \r\n def phsamp(C):\r\n return np.abs(C), np.angle(C)\r\n \r\n # Least-squares matrix approach\r\n A = buildA(t,frq)\r\n C, C0 = lstsqnumpy(A,X) # This works on all columns of X!!\r\n Amp, Phs= phsamp(C)\r\n\r\n # Reference the phase to some time\r\n if not phsbase == None:\r\n base = othertime.SecondsSince(phsbase)\r\n\tphsoff = phase_offset(frq,t[0],base)\r\n\tphsoff = np.repeat(phsoff.reshape((phsoff.shape[0],1)),lenX,axis=1)\r\n\tphs = np.mod(Phs+phsoff,2*np.pi)\r\n \r\n # Non-vectorized method (~20x slower)\r\n# Amp = np.zeros((Nfrq,lenX))\r\n# Phs = np.zeros((Nfrq,lenX))\r\n# for ii in range(0,lenX): \r\n# if mask[ii]==True: \r\n# C = lstsqnumpy(A,X[:,ii])\r\n# # Calculate the phase and amplitude\r\n# am, ph= phsamp(C)\r\n# Amp[:,ii] = am; Phs[:,ii] = ph\r\n \r\n \r\n # reshape the array\r\n Amp = np.reshape(Amp,(Nfrq,)+sz[1:])\r\n Phs = np.reshape(Phs,(Nfrq,)+sz[1:])\r\n C0 = np.reshape(C0,sz[1:])\r\n \r\n # Output back along the original axis\r\n return Amp.swapaxes(axis,0), Phs.swapaxes(axis,0), C0.swapaxes(axis,0)", "def discrete_integrate(trace, lnprob): \n \n (n_trace, n_dim) = trace.shape\n\n h, bins = np.histogramdd(trace, weights=np.exp(lnprob))\n\n deltas = [x[1]-x[0] for x in bins]\n\n val = np.sum(h)\n for dx in deltas:\n val *= dx\n\n return val", "def integrate(self, x, dx):\n raise NotImplementedError(\"Not implemented yet.\")", "def test_integrate_spectrum():\n e1 = Quantity(1, \"TeV\")\n e2 = Quantity(10, \"TeV\")\n einf = Quantity(1e10, \"TeV\")\n e = Quantity(1, \"TeV\")\n g = 2.3\n I = Quantity(1e-12, \"cm-2 s-1\")\n\n ref = power_law_energy_flux(I=I, g=g, e=e, e1=e1, e2=e2)\n norm = power_law_flux(I=I, g=g, e=e, e1=e1, e2=einf)\n f = lambda x: x * power_law_evaluate(x, norm, g, e)\n val = integrate_spectrum(f, e1, e2)\n assert_quantity_allclose(val, ref)\n\n # Test quantity handling\n e2_ = Quantity(1e4, \"GeV\")\n val_ = integrate_spectrum(f, e1, e2_)\n assert_quantity_allclose(val, val_)", "def integral (self):\n dx = self.xbins[1] - self.xbins[0]\n dy = self.ybins[1] - self.ybins[0]\n return self.sum * (dx * dy)", "def integrate(self, t):", "def integrand(order, theta, x_eval):\n return np.cos(order*theta - x_eval*np.sin(theta))/np.pi", "def circular_integration(xpoints, ypoints, endpoint_included=False, axis=0):\n if endpoint_included is False:\n integrateme = np.zeros(len(ypoints)+1)\n integrateme[:-1] = ypoints\n integrateme[-1] = ypoints[0] # wrap domain\n \n integratex = np.zeros(len(xpoints) + 1)\n integratex[:-1] = xpoints\n integratex[-1] = xpoints[-1] + (xpoints[1]-xpoints[0])\n elif endpoint_included is True:\n integrateme = ypoints\n integratex = xpoints\n \n intdata = np.trapz(integrateme, integratex, axis=axis)\n \n return intdata", "def integrate_trapezoid_col(fxdx_col, dx_col, init_val):\n assert len(fxdx_col) == len(dx_col)\n fxdx_l = fxdx_col.tolist()\n dx_l = dx_col.tolist()\n prev_val = init_val\n y = [init_val]*len(dx_col)\n prev_fx = 0\n for i in xrange(len(dx_col)):\n y_val = prev_val + float(dx_l[i]) * (fxdx_l[i] + prev_fx) / 2\n y[i] = y_val\n prev_val = y_val\n prev_fx = fxdx_l[i]\n \n return np.array(y)", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def sp_integrate_2D ( func ,\n xmin , xmax ,\n ymin , ymax , *args , **kwargs ) :\n from scipy import integrate\n ##\n result = integrate.dblquad ( func ,\n ymin ,\n ymax ,\n lambda x : xmin ,\n lambda x : xmax , \n *args , **kwargs )\n return result[0]", "def integrate_slice(self, x_array: np.array, y_array: np.array) -> np.array:\n try:\n area = sp(y_array, x_array)\n if np.isnan(area):\n logger.warning(f\"\"\"Integration failed in spectrum {self.name} using Simpson's rule. \n Falling back to trapezoidal rule.\"\"\")\n area = tp(y_array, x_array)\n return area\n except:\n raise SpectrumIntegrationError(f'Integration not possible for {self.name}')", "def get_axis(header, axis):\n \n logger = logging.getLogger(__name__)\n \n logger.debug(\"Will extract axis: {}.\".format(axis))\n \n wcs = WCS(header)\n\n wcs_arr_shape = wcs.array_shape\n logger.debug(\"WCS array shape: {}\".format(wcs_arr_shape))\n n_axis = wcs.array_shape[-axis]\n logger.debug(\"Axis should have {} elements.\".format(n_axis))\n if len(wcs_arr_shape) > 3:\n axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis), np.zeros(n_axis)])\n else:\n axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis)])\n\n axis_vals = np.asarray(axis_vals)\n axis_vals = axis_vals[:,axis-1]\n \n return axis_vals", "def integrate(self, lmin, lmax, z, band=1):\n\n if not hasattr(self, 'lummean'):\n self.magmean = np.array([(self.magbins[i]+self.magbins[i+1])/2\n for i in range(len(self.magbins)-1)])\n if not hasattr(self, 'zmean'):\n self.zmean = np.array([(self.zbins[i]+self.zbins[i+1])/2\n for i in range(len(self.zbins)-1)])\n\n if not hasattr(self, 'integ_spline'):\n self.mv_spline = RectBivariateSpline(self.magmean, self.zmean,\n self.luminosity_function[:,band,:])\n\n uvspl = lambda l : self.mv_spline(l, z)\n\n n = quad(uvspl, lmin, lmax)\n\n return n[0]", "def AngleHistogram(X,rotation_axis,x_angles,x_angle_location,y_angles,y_angle_location,allAngles):\n axis_ind =[]\n ind=np.argmin(X,axis=0)\n if rotation_axis == 'x':\n for i in range(len(ind)):\n axis_ind.append(x_angle_location[ind[i]])\n plt.hist(axis_ind, bins=np.array(range(-1,len(x_angles)))+0.5)\n plt.xticks(np.array(range(0,len(x_angles))),x_angles)\n pylab.xlim(-1,len(x_angles))\n plt.xlabel(\"x-angles\")\n plt.ylabel(\"frequency\")\n elif rotation_axis == 'y':\n for i in range(len(ind)):\n axis_ind.append(y_angle_location[ind[i]])\n plt.hist(axis_ind, bins=np.array(range(-1,len(y_angles)))+0.5)\n plt.xticks(np.array(range(0,len(y_angles))),y_angles)\n pylab.xlim(-1,len(y_angles))\n plt.xlabel(\"y-angles\")\n plt.ylabel(\"frequency\")\n elif rotation_axis == 'xy':\n axis_ind=ind\n plt.hist(axis_ind, bins=np.array(range(-1,len(allAngles[0])))+0.5)\n plt.xticks(np.array(range(0,len(allAngles[0]))),allAngles[0])\n pylab.xlim(-1,len(allAngles[0]))\n plt.xlabel(\"angles\")\n plt.ylabel(\"frequency\")\n else:\n print 'axis not available'\n #return axis_ind\n \n locs, labels = plt.xticks()\n plt.setp(labels, rotation=90)", "def normalize_axis(axis, ndim):\n if axis is None:\n return None\n\n if isinstance(axis, Integral):\n axis = int(axis)\n if axis < 0:\n axis += ndim\n\n if axis >= ndim or axis < 0:\n raise ValueError('Invalid axis index %d for ndim=%d' % (axis, ndim))\n\n return axis\n\n if isinstance(axis, Iterable):\n if not all(isinstance(a, Integral) for a in axis):\n raise ValueError(\"axis %s not understood\" % axis)\n\n return tuple(normalize_axis(a, ndim) for a in axis)\n\n raise ValueError(\"axis %s not understood\" % axis)", "def integrate(equ):\n if \"x\" in equ:\n return polynomial_equation(equ)\n else:\n return constant_equation(equ)", "def acceleration_sensor(axis):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t#get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Imu', Imu, 2)\n\tacceleration = data.linear_acceleration\n\tif axis == \"x\":\n\t\tresult = acceleration.x\n\telif axis == \"y\":\n\t\tresult = acceleration.y\n\telse:\n\t\tresult = acceleration.z\n\n\tdelete_sensor(sensor_name)\n\treturn result", "def getAxis(self,axis):\n\n\t\tif axis == \"u\":\n\t\t\tif len(self.usr) != 0:\n\t\t\t\treturn np.append([0], self.usr)\n\n\t\tif axis == \"s\":\n\t\t\tif len(self.seg) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.seg\n\t\t\t\telse:\n\t\t\t\t\tfirst = self.seg[0] - 1.\n\t\t\t\t\treturn np.append([first], self.seg)\n\n\t\tif axis == \"c\":\n\t\t\tif len(self.cos) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.cos\n\t\t\t\telse:\n\t\t\t\t\tfirst = -1.\n\t\t\t\t\treturn np.append([first], self.cos)\n\n\t\tif axis == \"e\":\n\t\t\tif len(self.erg) != 0:\n\t\t\t\tfirst = self.erg[0] - 1.\n\t\t\t\treturn np.append([first], self.erg)\n\n\t\tif axis == \"t\":\n\t\t\tif len(self.tim) != 0:\n\t\t\t\tfirst = self.tim[0] - 1.\n\t\t\t\treturn np.append([first], self.tim)\n\n\t\tif axis == \"i\":\n\t\t\treturn self.cora\n\n\t\tif axis == \"j\":\n\t\t\treturn self.corb\n\n\t\tif axis == \"k\":\n\t\t\treturn self.corc\n\n\t\treturn []", "def sine2d(xx,yy,amp,wavelength,a,phase):\n z = amp*sin(((cos(a)*xx+sin(a)*yy)-phase*wavelength)*2.*pi/wavelength)\n return z" ]
[ "0.5965463", "0.5722021", "0.55001545", "0.54563475", "0.5267387", "0.51354736", "0.5102577", "0.50662524", "0.49761945", "0.49703512", "0.4968417", "0.4947284", "0.49409816", "0.49374717", "0.49055633", "0.48579165", "0.48439932", "0.47934014", "0.4778929", "0.4778446", "0.47499588", "0.4729546", "0.4703109", "0.46892956", "0.46831098", "0.46827915", "0.46764606", "0.46749753", "0.46581516", "0.4649142" ]
0.65771925
0
Declarative request hook for TPU Start command.
def StartRequestHook(ref, args, request): del ref del args start_request = GetMessagesModule().StartNodeRequest() request.startNodeRequest = start_request return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start():\n log(\"=========== hook: start ===========\")", "def start( *args, **kwargs ):", "def start():\n request.start = time.time()", "def start_procedure(self):\n pass", "def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n \n self._dts = rift.tasklets.DTS(self.tasklet_info,\n UtCompositeYang.get_schema(),\n self._loop,\n self.on_dts_state_change) \n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.log.debug(\"Starting TestDriverTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING", "def start(self):\n ...", "def _start(self, unit):\n raise NotImplementedError", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def start_processing(self):", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def _start(self):", "def startFluidinfo():\n sudo('start fluidinfo-api')\n sudo('/etc/init.d/haproxy start')\n sudo('/etc/init.d/nginx start')", "def start_request(self):\n self.session_manager.start_request()", "def start(self, device, *args, **kwargs):\n raise NotImplementedError", "def start(self, container: Container):", "def setup(self, args={}):\n\n return Status.RUN", "def start():", "def start():", "def start():", "def start():", "def startup():\n\n # Earlier versions of traffic_ctl do not support\n # \"server start\", so we prefer traffic_line here.\n if _TRAFFICLINE:\n cmd = _traffic_line(\"-U\")\n else:\n cmd = _traffic_ctl(\"server\", \"start\")\n\n _subprocess(cmd)\n return _statuscmd()", "def start():\n logging.info(\"Execution Started\")", "def set_program_start(op):\n replace_platform_variable(\"start\", op)", "def start():\n\tdata = bottle.request.json\n\t(\"START:\", json.dumps(data))\n\n\tresponse = {\"color\": \"#4F1851\", \"headType\": \"evil\", \"tailType\": \"hook\"}\n\treturn HTTPResponse(\n\t\tstatus=200,\n\t\theaders={\"Content-Type\": \"application/json\"},\n\t\tbody=json.dumps(response),\n\t)", "def TerminalClientStart(self):\n pass", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def setupStarted(self, *args, **kwargs): # real signature unknown\n pass", "def StartupNext(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.62123007", "0.6009447", "0.5895262", "0.58739084", "0.58359814", "0.568772", "0.5687707", "0.56750745", "0.56750745", "0.56496805", "0.5641725", "0.56403697", "0.56364733", "0.5618818", "0.56165755", "0.560966", "0.5591064", "0.5584766", "0.5584766", "0.5584766", "0.5584766", "0.5573866", "0.55732954", "0.5571909", "0.5549008", "0.5533163", "0.55033416", "0.55033416", "0.54959595", "0.549486" ]
0.63984233
0
Declarative request hook for TPU Stop command.
def StopRequestHook(ref, args, request): del ref del args stop_request = GetMessagesModule().StopNodeRequest() request.stopNodeRequest = stop_request return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_stop(self):\n self._messaged.emit((\"stop\",None,0,None))", "def StopRecordEnv(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Stop(self, request, context):\r\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\r\n context.set_details('Method not implemented!')\r\n raise NotImplementedError('Method not implemented!')", "def Stop(self, *_):\n self.Log('Stopping...')\n self._stop = True", "def stop_procedure(self):\n pass", "def _stop(self):", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def stop(self) -> str:\n return self.rpc_call(\"stop\")", "def stopVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/stop\" % (node,vmid), post_data)\n return data", "def request_stop(self, req):\n log.info(\"Received stop request\")\n if not self._configured:\n msg = \"FITS interface server is not configured\"\n log.error(msg)\n return (\"fail\", msg)\n log.info(\"Stopping FITS interface capture\")\n self._stop_capture()\n self._fw_connection_manager.drop_connection()\n return (\"ok\",)", "def StopControlService(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Stop(self, request, global_params=None):\n config = self.GetMethodConfig('Stop')\n return self._RunMethod(\n config, request, global_params=global_params)", "def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)", "def stopRPC(time): #Status: WIP\r\n pass", "def stopwasp():\n\n\trespond = send_command('stopwasp')", "def Stop(self):\n raise NotImplementedError", "def stop(self):\n self.scion_sh('stop')", "def _stop(self, host):\n pass", "def teardown(request, exec_env):\n\n def fin():\n if exec_env.get_script_state() == \"RUNNING\":\n resp = exec_env.run_oet_command(\"stop\", \"--run_abort=False\")\n assert \"Successfully stopped\" in resp\n\n request.addfinalizer(fin)", "def stop_device(self):\n\n self.state = 'stopped'", "def stop(self):\n\n print(\"Status sent: stop\")\n\n offset = self.app_id * 10\n\n status_dict = {}\n # Test run led\n status_dict[offset + self.PIN_LED] = 0\n\n self.post_dict(status_dict)", "def stop(self):\n self._context.state = STOPPED", "def stop_run(arn=None):\n pass", "def stop(self) -> None:\n ...", "def stop(self):\n return self._send_command(\"stop\")", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.cta_engine.event_engine.unregister(EVENT_TIMER, self.process_timer_event)", "def stop():", "def stop():", "def stop():", "def stop():" ]
[ "0.6810477", "0.66077775", "0.65568364", "0.6482014", "0.6473083", "0.6434593", "0.6422207", "0.63642", "0.6363843", "0.62955624", "0.62584615", "0.62437326", "0.6208099", "0.6195634", "0.6164148", "0.6162522", "0.6145841", "0.61360335", "0.6122276", "0.60925007", "0.6089813", "0.6087588", "0.6077723", "0.6075994", "0.606617", "0.6063789", "0.606075", "0.606075", "0.606075", "0.606075" ]
0.7161485
0
Add TPU resource args to parser for reimage command.
def AddReimageResourcesToParser(parser): custom_help = { 'tpu': 'The Cloud TPU to reimage.' } resource_specs = LoadTPUResourceSpecs(custom_help) presentation_specs = [] for arg in (spec for spec in resource_specs if spec.name in custom_help): presentation_specs.append(presentation_specs_lib.ResourcePresentationSpec( TPU_YAML_SPEC_TEMPLATE[arg.name]['flag_name'], arg.GenerateResourceSpec(), arg.group_help, flag_name_overrides={ n: '' for n in TPU_YAML_SPEC_TEMPLATE[arg.name]['removed_flags'] }, required=True)) concept_parsers.ConceptParser(presentation_specs).AddToParser(parser) # Not using Tensorflow resource arg here due to parsing conflict with zone # attribute and its ultimately passed only as string to API base.Argument( '--version', required=True, help='The Tensorflow version to Reimage Cloud TPU with.').AddToParser( parser)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_args(parser):\n # fmt: off\n parser.add_argument('--pixel-loss-type', type=str, default='l1')\n # fmt: on", "def add_args(parser):\n parser.add_argument(\"data\", metavar=\"FILE\", help=\"file prefix for data\")\n parser.add_argument(\n \"--num-classes0\",\n type=int,\n default=-1,\n help=\"number of classes0\",\n )\n parser.add_argument(\"--no-shuffle\", action=\"store_true\", default=False)", "def add_args(parser):\n # fmt: off\n parser.add_argument('data', help='colon separated path to data directories list, \\\n will be iterated upon during epochs in round-robin manner')\n parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',\n help='source language')\n parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',\n help='target language')\n parser.add_argument('--lazy-load', action='store_true',\n help='load the dataset lazily')\n parser.add_argument('--raw-text', action='store_true',\n help='load raw text dataset')\n parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n parser.add_argument('--upsample-primary', default=1, type=int,\n help='amount to upsample primary dataset')\n # fmt: on\n parser.add_argument('--main-src-wordfreq', default=None, type=str,\n help='word frequency file of the main train source')\n parser.add_argument('--dialect-src-wordfreq', default=None, type=str,\n help='word frequency file of the dialect train source')\n parser.add_argument('--dialect-tau', default=1., type=float)\n parser.add_argument('--src-gradnorm-tau', default=1., type=float)\n parser.add_argument('--lm-path', default=None, type=str)\n parser.add_argument('--lm-dict-path', default=None, type=str)\n parser.add_argument('--lm-topk', default=0, type=int)\n parser.add_argument('--src-gradnorm-path', default=None, type=str)\n parser.add_argument('--src-gradnorm-nosoftmax', action='store_true')\n parser.add_argument('--exclude-self', action='store_true')", "def TpuTrainStep(self, *args):\n with tf.name_scope('tpu_train'):\n with py_utils.OpportunisticVariableReuseScope(True):\n with contextlib.ExitStack() as stack:\n if py_utils.IsEagerMode():\n stack.enter_context(py_utils.GradientTape(persistent=True))\n self._model.ConstructFPropBPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self.task.eval_metrics, args)\n outfeed_op = self._OutfeedEnqueue(self.task.per_example_tensors)\n summed_metrics = []\n assert len(per_step_eval_metrics) == len(args)\n with tf.control_dependencies([outfeed_op]):\n for x, y in zip(per_step_eval_metrics, args):\n summed_metrics.append(x + y)\n return summed_metrics + [self.task.train_op]", "def add_args(parser):\n rescore_add_args(parser)\n parser.add_argument(\n \"--rl-weight\",\n type=float,\n default=0.1,\n help=\"trade-off coefficient of rl loss\",\n )\n parser.add_argument(\n \"--rl-num-trajectory\",\n type=int,\n default=3,\n help=\"num trajectory in rl training\",\n )", "def add_args(self, parser):", "def TpuEvalStep(self, *args):\n with tf.name_scope('tpu_eval'):\n self._model.ConstructFPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self._task.eval_metrics, args)\n return [x + y for x, y in zip(per_step_eval_metrics, args)]", "def handle_args():\n parser = argparse.ArgumentParser(description=\"Faster-RCNN Implementation\")\n parser.add_argument(\"-handle-gpu\", action=\"store_true\", help=\"Tensorflow 2 GPU compatibility flag\")\n args = parser.parse_args()\n return args", "def TRT_DigitizationPUToolCfg(flags, name=\"TRT_DigitizationToolPU\", **kwargs):\n kwargs.setdefault(\"OutputObjectName\", \"TRT_PU_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_PU_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 2)\n return TRT_DigitizationBasicToolCfg(flags, name, **kwargs)", "def add_args(parser):\n # fmt: off\n parser.add_argument(\"--hidden-size\", type=int, default=512)\n parser.add_argument(\"--max-epochs\", type=int, default=1000)\n parser.add_argument(\"--sample-size\", type=int, default=500)\n parser.add_argument(\"--batch-size\", type=int, default=4)\n # fmt: on", "def modify_train_args(args: Namespace):\n if args.message.startswith('tetra'):\n setattr(args, 'tetra', True)\n else:\n setattr(args, 'tetra', False)\n\n # shuffle=False for custom sampler\n if args.shuffle_pairs:\n setattr(args, 'no_shuffle', True)\n\n setattr(args, 'device', torch.device('cuda' if torch.cuda.is_available() else 'cpu'))", "def add_train_args(parser):\n\n # Runtime environment\n runtime = parser.add_argument_group('Environment')\n runtime.add_argument('--dataset', type=str, default=\"searchqa\",\n help='Dataset: searchqa, quasart or unftriviaqa')\n runtime.add_argument('--base_dir', type=str, default=\".\",\n help='base_dir of the pre-processing')", "def Args(parser):\n parser.add_argument('data_asset',\n type=arg_parsers.ArgList(min_length=1),\n default=[],\n metavar='DATA_ASSET_PATH',\n help='Comma-separated paths to the data assets.')\n parser.add_argument('annotation',\n default='',\n metavar='TAXONOMY_ANNOTATION',\n help='Annotation to tag the data asset(s) with.')\n parser.add_argument('--load',\n required=False,\n metavar='FILE_PATH',\n help='Tag data assets with annotations specified in a '\n 'text file.')\n parser.add_argument('--remove',\n required=False,\n action='store_true',\n default=False,\n help='If set, remove the specified annotations on data '\n 'assets.')", "def add_args(parser):\n # fmt: off\n TranslationTask.add_args(parser)\n parser.add_argument('--langs', required=True, metavar='LANG',\n help='comma-separated list of monolingual language, for example, \"en,de,fr\"'\n 'be careful these langs are what you used for pretraining (the same order),'\n 'not for finetuning.'\n 'you should always add all pretraining language idx during finetuning.')\n parser.add_argument('--multilang-sampling-alpha', type=float, default=0.7,\n help='sub sampling factor')\n parser.add_argument('--common_eos', type=str,\n help='common eos symbol for all languages')\n parser.add_argument('--placeholder', type=int, default=0,\n help='number of placeholder in dictionaries')\n parser.add_argument('--gt-langs', type=str,\n help=\"languages used in generation finetuning, separated wiht -, for example, 'en-fr-de'\")\n\n # fmt: on", "def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir',\n type = str,\n default = 'gs://flowers_resnet/tpu/resnet/data',\n help = 'The data directory file generated by the preprocess component.')\n parser.add_argument('--output',\n type = str,\n help = 'Path to GCS location to store output.')\n parser.add_argument('--region',\n type = str,\n default = 'us-central1',\n help = 'Region to use.')\n parser.add_argument('--depth',\n type = int,\n default = 50,\n help = 'Depth of ResNet model.')\n parser.add_argument('--train_batch_size',\n type = int,\n default = 128,\n help = 'Batch size for training.')\n parser.add_argument('--eval_batch_size',\n type = int,\n default = 32,\n help = 'Batch size for validation.')\n parser.add_argument('--steps_per_eval',\n type = int,\n default = 250,\n help = 'Steps per evaluation.')\n parser.add_argument('--train_steps',\n type = int,\n default = 10000,\n help = 'Number of training steps.')\n parser.add_argument('--num_train_images',\n type = int,\n default = 3300,\n help = 'Number of training set images.')\n parser.add_argument('--num_eval_images',\n type = int,\n default = 370,\n help = 'Number of validation set images.')\n parser.add_argument('--num_label_classes',\n type = int,\n default = 5,\n help = 'Number of classes.')\n parser.add_argument('--TFVERSION',\n type = str,\n default = '1.9',\n help = 'Version of TensorFlow to use.')\n args = parser.parse_args()\n return args", "def simplerun(args, options):\n try:\n cutoff = args.index('--')\n cmdline = ' '.join(args[cutoff + 1:])\n except ValueError:\n cmdline = ' '.join(args)\n\n print(\"Running command: '%s'\" % cmdline)\n\n thermos_task = ThermosTaskWrapper(Task(\n name=options.name,\n resources=Resources(cpu=1.0, ram=256 * 1024 * 1024, disk=0),\n processes=[Process(name=options.name, cmdline=cmdline)]))\n\n really_run(thermos_task,\n options.root,\n tempfile.mkdtemp(),\n task_id=options.task_id,\n user=options.user,\n prebound_ports=options.prebound_ports,\n chroot=False,\n daemon=options.daemon)", "def LoadTPUResourceSpecs(custom_help=None):\n resource_file_contents = pkg_resources.GetResource(TPU_YAML_RESOURCE_PATH,\n 'resources.yaml')\n if not resource_file_contents:\n raise calliope_exceptions.BadFileException(\n 'Resources not found in path [{}]'.format(TPU_YAML_RESOURCE_PATH))\n\n resource_dict = yaml.load(resource_file_contents)\n resource_specs = []\n for resource_name in TPU_YAML_SPEC_TEMPLATE:\n spec = resource_dict.get(resource_name, None)\n if not spec:\n raise ValueError(\n 'Resource spec [{}] not found in resource spec {}.yaml'.format(\n resource_name, TPU_YAML_RESOURCE_PATH))\n\n # Don't modify template\n temp_spec = copy.deepcopy(TPU_YAML_SPEC_TEMPLATE[resource_name])\n\n temp_spec['spec'] = spec\n if custom_help and custom_help.get(resource_name):\n temp_spec['help_text'] = custom_help[resource_name]\n resource_specs.append(resource_arg_schema.YAMLResourceArgument.FromData(\n temp_spec))\n return resource_specs", "def __init__(self, *args, **kwargs):\n super(MadryEtAlMultiGPU, self).__init__(*args, **kwargs)\n self.structural_kwargs += ['ngpu']", "def Args(cls, parser):\n ssh_utils.BaseSSHCLIHelper.Args(parser)\n AddSSHArgs(parser)\n tpu_ssh_utils.AddTPUSSHArgs(\n parser, enable_iap=cls._ENABLE_IAP, enable_batching=cls._ENABLE_BATCHING\n )\n AddCommandArgGroup(parser)\n flags.AddZoneFlag(parser, resource_type='tpu', operation_type='ssh')", "def parse_params(self, ngpu=1, **kwargs):\n\n return_status = super(MadryEtAlMultiGPU, self).parse_params(**kwargs)\n self.ngpu = ngpu\n\n return return_status", "def __init__(__self__,\n resource_name: str,\n args: Optional[TargetPoolArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def parseargs(p):\n p.set_defaults(func=func)\n p.description = \"print a universally unique identifier (UUID)\"\n p.add_argument(\n \"-r\",\n \"--random\",\n action=\"store_const\",\n dest=\"uuidtype\",\n const='RANDOM',\n help=\"Generate a random UUID\",\n )\n p.add_argument(\n \"-t\",\n \"--time\",\n action=\"store_const\",\n dest=\"uuidtype\",\n const='TIME',\n help=\"Generate a UUID from a host ID, \"\n + \"sequence number, and the current time.\",\n )\n return p", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.add(args)", "def resource_parser(parsed_args):\n # set some defaults\n parsed_args[\"resources\"] = {\n **{\"parallel_downloads\": 3, \"genomepy_downloads\": 1, \"deeptools_limit\": 16, \"R_scripts\": 1},\n **parsed_args.get(\"resources\", {}),\n }\n\n if \"mem_mb\" in parsed_args[\"resources\"]:\n # convert memory to gigabytes\n parsed_args[\"resources\"][\"mem_gb\"] = round(parsed_args[\"resources\"][\"mem_mb\"] / 1024.0)\n del parsed_args[\"resources\"][\"mem_mb\"]\n\n # no need to get system limit when specified\n if \"mem_gb\" in parsed_args[\"resources\"]:\n parsed_args[\"resources\"][\"mem_gb\"] = int(parsed_args[\"resources\"][\"mem_gb\"])\n return\n\n if \"cluster\" in parsed_args:\n # if running on a cluster assume no limit on memory (unless specified)\n parsed_args[\"resources\"][\"mem_gb\"] = 999999\n else:\n # otherwise, assume system memory\n mem = psutil.virtual_memory().total / 1024 ** 3\n parsed_args[\"resources\"][\"mem_gb\"] = round(mem)", "def add_args(parser):\n add_encoder_args(parser)\n add_decoder_args(parser)", "def TRT_DigitizationPUCfg(flags, name=\"TRT_DigitizationPU\", **kwargs):\n acc = TRT_DigitizationPUToolCfg(flags)\n kwargs[\"PileUpTools\"] = acc.popPrivateTools()\n acc = TRT_DigitizationBasicCfg(flags, name=name, **kwargs)\n acc.merge(TRT_OutputCfg(flags))\n return acc", "def add_args(parser):\r\n parser.add_argument(\"data\", help=\"path to data directory\")\r\n parser.add_argument(\r\n \"--silence-token\", default=\"\\u2581\", help=\"token for silence (used by w2l)\"\r\n )\r\n parser.add_argument(\r\n \"--max-source-positions\",\r\n default=sys.maxsize,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of frames in the source sequence\",\r\n )\r\n parser.add_argument(\r\n \"--max-target-positions\",\r\n default=1024,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of tokens in the target sequence\",\r\n )", "def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')" ]
[ "0.54705244", "0.51580477", "0.5047737", "0.50449234", "0.50383246", "0.5009963", "0.49864167", "0.49745366", "0.49586836", "0.49418563", "0.49120963", "0.4862225", "0.48517346", "0.48434925", "0.48415738", "0.48293468", "0.48072395", "0.47596607", "0.47467905", "0.4669572", "0.46657103", "0.46583542", "0.46579573", "0.46516183", "0.46441886", "0.4629568", "0.46030214", "0.45930332", "0.45926094", "0.45895073" ]
0.7974637
0
Retrieves the project field from the provided network value.
def _ParseProjectNumberFromNetwork(network, user_project): try: registry = resources.REGISTRY.Clone() network_ref = registry.Parse(network, collection='compute.networks') project_identifier = network_ref.project except resources.Error: # If not a parseable resource string, then use user_project project_identifier = user_project return projects_command_util.GetProjectNumber(project_identifier)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_project(self, i):\r\n return self.__projects[i]", "def __project(uri):\n uri = uri.lower().split('/')[-1].split('_')[0]\n project = {\n 'as': \"ICOS\",\n 'es': \"ICOS\",\n 'os': \"ICOS\",\n 'neon': 'NEON',\n 'ingos': 'INGOS',\n 'fluxnet': 'FLUXNET'\n }\n\n if uri in project:\n return project.get(uri)\n else:\n return 'other'", "def get_project_specific(self, project_format='id'):\n if self.api_version == 2:\n return self.creds.get('tenant_%s' % project_format)\n else:\n return self.creds.get('project_%s' % project_format)", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def get_project(name):\n tx = cypher_transaction()\n query = \"\"\"MATCH (n:project) WHERE n.name={project_name} RETURN n\"\"\"\n tx.append(query, parameters={'project_name': name})\n result = tx.commit()\n\n # Returns a result of the form [[\n # Record(\n # columns=('n',),\n # values=(Node('http://localhost:7474/db/data/node/233'),)\n # )\n # ]]\n return _first(result)[0].values[0]", "def GetProject(args):\n return args.project or properties.VALUES.core.project.GetOrFail()", "def project(self) -> str:\n return self.proto.project", "def _get_project(self):\n project_id = self._node.parm('project').eval()\n projects = data_block.for_houdini().projects()\n project_names = [project[\"name\"]\n for project in projects if project['id'] == project_id]\n if not project_names:\n raise hou.InvalidInput(\n \"%s %s is an invalid project.\" %\n self._node.name(), project_id)\n return {\n \"id\": project_id,\n \"name\": project_names[0]\n }", "def project(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project\")", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError" ]
[ "0.63224125", "0.62142074", "0.6150654", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.6148387", "0.61081284", "0.6102496", "0.60995054", "0.6040108", "0.60334295", "0.6006636", "0.5988902" ]
0.71506685
0
Validates that supplied network has been peered to a GoogleOrganization. Uses the Service Networking API to check if the network specified via network flag has been peered to Google Organization. If it has, proceeds with TPU create operation otherwise will raise ServiceNetworking exception. Check is only valid if useservicenetworking has been specified otherwise check will return immediately.
def CreateValidateVPCHook(ref, args, request): del ref service_networking_enabled = args.use_service_networking if service_networking_enabled: project = args.project or properties.VALUES.core.project.Get(required=True) try: network_project_number = _ParseProjectNumberFromNetwork(args.network, project) lookup_result = peering.ListConnections( network_project_number, 'servicenetworking.googleapis.com', args.network) except (exceptions.ListConnectionsPermissionDeniedException, apitools_exceptions.HttpError) as e: raise ServiceNetworkingException( _PROJECT_LOOKUP_ERROR.format(args.network, project, e)) if not lookup_result: raise ServiceNetworkingException( _PEERING_VALIDATION_ERROR.format(args.network)) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")", "def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def _validate_create_network(self, context, net_data):\n external = net_data.get(extnet_apidef.EXTERNAL)\n is_external_net = validators.is_attr_set(external) and external\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n if with_qos:\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n if is_external_net:\n raise nsx_exc.QoSOnExternalNet()", "def create_external_network(self, extnet_info, ignore_privious=False):\n LOG_OBJ.debug(\"Creating External Network : \")\n _tenant_name = config.cloud_admin_project\n _net_name = extnet_info['extnet_name']\n _gateway = extnet_info['gateway']\n _cidr = extnet_info['cidr']\n _start_ip = extnet_info['start_ip']\n _end_ip = extnet_info['end_ip']\n\n if not ignore_privious:\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n output = json.loads(response.data)\n if output is None:\n LOG_OBJ.error(\"No response from server while getting\"\n \" networks.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Getting networks list Failed with status %s \" %\n response.status)\n return response.status\n\n for nets in output['networks']:\n if nets['router:external']:\n LOG_OBJ.info(\"External Network already created\")\n return\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _extnet_info = {\"network\": {\n \"tenant_id\": self.cloud_admin_info[\"project_id\"],\n \"name\": _net_name,\n \"router:external\": \"True\",\n \"admin_state_up\": True}}\n _body = json.dumps(_extnet_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n output = json.loads(response.data)\n if output is None:\n LOG_OBJ.error(\"No response from server while creating ext net.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create ext network Failed with status %s \" %\n response.status)\n return response.status\n\n _ext_net_id = output['network']['id']\n LOG_OBJ.debug(\"External Network created successfully. ID:%s\" %\n _ext_net_id)\n\n # Creating External Subnet\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _ext_subnet_info = {\"subnet\": {\n \"ip_version\": 4,\n \"allocation_pools\": [{\"start\": _start_ip,\n \"end\": _end_ip}],\n \"gateway_ip\": _gateway,\n \"enable_dhcp\": \"False\",\n \"network_id\": _ext_net_id,\n \"tenant_id\": self.cloud_admin_info[\"project_id\"],\n \"cidr\": _cidr,\n \"name\": _net_name + \"-sub\"}}\n _body = json.dumps(_ext_subnet_info)\n output = self.request(\"POST\", _url, _headers, _body)\n if output is None:\n LOG_OBJ.error(\"No response from server while creating ext-subet\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create subnet Failed with status %s \" %\n response.status)\n return response.status\n\n return _ext_net_id", "def _validate_external_net_create(self, net_data, default_tier0_router,\n tier0_validator=None):\n if not validators.is_attr_set(net_data.get(pnet.PHYSICAL_NETWORK)):\n tier0_uuid = default_tier0_router\n else:\n tier0_uuid = net_data[pnet.PHYSICAL_NETWORK]\n if ((validators.is_attr_set(net_data.get(pnet.NETWORK_TYPE)) and\n net_data.get(pnet.NETWORK_TYPE) != utils.NetworkTypes.L3_EXT and\n net_data.get(pnet.NETWORK_TYPE) != utils.NetworkTypes.LOCAL) or\n validators.is_attr_set(net_data.get(pnet.SEGMENTATION_ID))):\n msg = (_(\"External network cannot be created with %s provider \"\n \"network or segmentation id\") %\n net_data.get(pnet.NETWORK_TYPE))\n raise n_exc.InvalidInput(error_message=msg)\n if tier0_validator:\n tier0_validator(tier0_uuid)\n return (True, utils.NetworkTypes.L3_EXT, tier0_uuid, 0)", "def check_has_network_code_checkbox(self):\n self.click_element(self.has_network_code_checkbox_locator)", "def valid_ip_network(network):\n try:\n ipaddr.IPNetwork(network)\n except ValueError:\n return False\n\n return True", "def is_docker_user_defined_network(network): # type: (str) -> bool\n return bool(network) and network != 'bridge'", "def validate_network(options):\n\n # Start marker for time measure\n start = time.time()\n\n #------------#\n # INPUTS #\n #------------#\n\n # Our input network 1\n input_network_file = options.network_file\n type_id = options.type_id\n network_format = options.network_format\n name_input_network = 'INPUT'\n\n # HIPPIE network\n hippie_file = '/home/quim/Databases/hippie/HIPPIE-current.mitab.txt'\n hippie_type_id = 'geneID' # It can be geneID or UniprotEntry\n output_hippie_file = '/home/quim/data/networks/HIPPIE/HIPPIE.{}.multifields'.format(hippie_type_id)\n output_hippie_newID_file = '/home/quim/data/networks/HIPPIE/HIPPIE.{}.multifields'.format(type_id)\n hippie_network_format = 'multi-fields'\n\n # ConsensusPathDB network\n ConsensusPathDB_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI'\n output_ConsensusPath_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI.multifields'\n output_ConsensusPath_newID_file = '/home/quim/data/networks/ConsensusPathDB/ConsensusPathDB_human_PPI.{}.multifields'.format(type_id)\n consensus_network_format = 'multi-fields'\n\n # I2D network\n I2D_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.tab'\n output_I2D_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.multifields'\n output_I2D_newID_file = '/home/quim/data/networks/I2D/i2d.2_9.Public.HUMAN.{}.multifields'.format(type_id)\n I2D_network_format = 'multi-fields'\n\n\n #------------------------#\n # DEFINE OUR NETWORK #\n #------------------------#\n\n # Define the input network\n network = NA.Network(input_network_file, type_id, network_format)\n\n print('{} network'.format(name_input_network))\n print('Number of edges: {}'.format(len(network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(network.get_nodes())))\n\n\n #---------------------------#\n # DEFINE HIPPIE NETWORK #\n #---------------------------#\n\n if not fileExist(output_hippie_file):\n hippie_instance = VA.HippieParser(hippie_file)\n hippie_instance.parse()\n hippie_network = hippie_instance.write_network_file(output_hippie_file, hippie_network_format, hippie_type_id)\n else:\n hippie_network = NA.Network(output_hippie_file, hippie_type_id, hippie_network_format)\n\n # Translate HIPPIE to 'type_id'\n if type_id.lower() != hippie_type_id.lower():\n if not fileExist(output_hippie_newID_file):\n hippie_network = VA.translate_network_from_BIANA(hippie_network, hippie_type_id, type_id, output_hippie_newID_file)\n else:\n hippie_network = NA.Network(output_hippie_newID_file, type_id, hippie_network_format)\n\n print('HIPPIE network')\n print('Number of edges: {}'.format(len(hippie_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(hippie_network.get_nodes())))\n\n\n #------------------------------------#\n # DEFINE CONSENSUSPATHDB NETWORK #\n #------------------------------------#\n\n if not fileExist(output_ConsensusPath_file):\n consensus_instance = VA.ConsensusPathDBParser(ConsensusPathDB_file)\n consensus_network_uniprot = consensus_instance.parse(output_ConsensusPath_file, consensus_network_format)\n else:\n consensus_network_uniprot = NA.Network(output_ConsensusPath_file, 'uniprotentry', consensus_network_format)\n\n # Translate ConsensusPathDB to 'type_id'\n if type_id.lower() != 'uniprotentry':\n if not fileExist(output_ConsensusPath_newID_file):\n consensus_network = VA.translate_network_from_BIANA(consensus_network_uniprot, 'uniprotentry', type_id, output_ConsensusPath_newID_file)\n else:\n consensus_network = NA.Network(output_ConsensusPath_newID_file, type_id, consensus_network_format)\n else:\n consensus_network = consensus_network_uniprot\n\n print('ConsensusPath (uniprotentry) network')\n print('Number of edges: {}'.format(len(consensus_network_uniprot.get_edges())))\n print('Number of nodes: {}\\n'.format(len(consensus_network_uniprot.get_nodes())))\n\n print('ConsensusPath network')\n print('Number of edges: {}'.format(len(consensus_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(consensus_network.get_nodes())))\n\n\n #------------------------#\n # DEFINE I2D NETWORK #\n #------------------------#\n\n if not fileExist(output_I2D_file):\n I2D_instance = VA.I2DParser(I2D_file)\n I2D_network_uniprot = I2D_instance.parse(output_I2D_file, I2D_network_format)\n else:\n I2D_network_uniprot = NA.Network(output_I2D_file, 'uniprotaccession', I2D_network_format)\n\n # Translate I2D to 'type_id'\n if type_id.lower() != 'uniprotaccession':\n if not fileExist(output_I2D_newID_file):\n I2D_network = VA.translate_network_from_BIANA(I2D_network_uniprot, 'uniprotaccession', type_id, output_I2D_newID_file)\n else:\n I2D_network = NA.Network(output_I2D_newID_file, type_id, I2D_network_format)\n else:\n I2D_network = I2D_network_uniprot\n\n print('I2D (uniprotaccession) network')\n print('Number of edges: {}'.format(len(I2D_network_uniprot.get_edges())))\n print('Number of nodes: {}\\n'.format(len(I2D_network_uniprot.get_nodes())))\n\n print('I2D network')\n print('Number of edges: {}'.format(len(I2D_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(I2D_network.get_nodes())))\n\n\n #----------------------------------#\n # CHECK OVERLAP OF NODES/EDGES #\n #----------------------------------#\n\n print_summary_overlap(network, hippie_network, name_input_network, 'HIPPIE')\n print_summary_overlap(network, consensus_network, name_input_network, 'CONSENSUSPATHDB')\n print_summary_overlap(network, I2D_network, name_input_network, 'I2D')\n\n # print_summary_overlap(hippie_network, consensus_network, 'HIPPIE', 'CONSENSUSPATHDB')\n # print_summary_overlap(hippie_network, I2D_network, 'HIPPIE', 'I2D')\n # print_summary_overlap(consensus_network, I2D_network, 'CONSENSUSPATHDB', 'I2D')\n\n\n # End marker for time\n end = time.time()\n print('\\nTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\\n'.format(end - start, (end - start) / 60))", "def test_taas_forwarded_traffic_provider_net_negative(self):\n\n with self._setup_topology(taas=False, use_taas_cloud_image=True,\n provider_net=True):\n # Check that traffic was NOT forwarded to TAAS service\n self.assertFalse(self._check_icmp_traffic())", "def test_check_network(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n # Inputs are unconnected, check_network() should return False\n assert not network.check_network()\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # Inputs are now connected, check_network() should return True\n assert network.check_network()", "def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True", "def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)", "def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )", "def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def test_taas_forwarded_traffic_provider_net_positive(self):\n\n with self._setup_topology(use_taas_cloud_image=True,\n provider_net=True):\n # Check that traffic was forwarded to TAAS service\n self.assertTrue(self._check_icmp_traffic())", "def test_networking_project_network_create(self):\n pass", "def _check_consistency(self):\n\n # Run forward inference with n_sim=2 and catch any exception\n try:\n _, sim_data = self._forward_inference(n_sim=2, n_obs=10)\n except Exception as err:\n raise SimulationError(repr(err))\n\n # Run summary network check\n if self.summary_stats is not None:\n try:\n _ = self.summary_stats(sim_data)\n except Exception as err:\n raise SummaryStatsError(repr(err))\n\n # TODO: Run checks whether the network works with the data format\n\n # TODO: Run checks that loss works with the provided network", "def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()", "def checklan(ipaddr, network):\n return True", "def check_service(self, url: str, check_wfs_member: bool = False, check_image: bool = False):\n service_status = self.check_status(url, check_wfs_member=check_wfs_member, check_image=check_image)\n if service_status.success is True:\n self.handle_service_success(service_status)\n else:\n self.handle_service_error(service_status)", "def check_validity(opt_result, digraph, ndds, max_cycle, max_chain, min_chain = None):\n\n # all used edges exist\n for chain in opt_result.chains:\n if chain.vtx_indices[0] not in [e.tgt.id for e in ndds[chain.ndd_index].edges]:\n raise KidneyOptimException(\"Edge from NDD {} to vertex {} is used but does not exist\".format(\n chain.ndd_index, chain.vtx_indices[0]))\n for cycle in opt_result.cycles:\n for i in range(len(cycle)):\n if digraph.adj_mat[cycle[i-1].id][cycle[i].id] is None:\n raise KidneyOptimException(\"Edge from vertex {} to vertex {} is used but does not exist\".format(\n cycle[i-1].id, cycle[i].id))\n \n # no vertex or NDD is used twice\n ndd_used = [False] * len(ndds)\n vtx_used = [False] * len(digraph.vs)\n for chain in opt_result.chains:\n if ndd_used[chain.ndd_index]:\n raise KidneyOptimException(\"NDD {} used more than once\".format(chain.ndd_index))\n ndd_used[chain.ndd_index] = True\n for vtx_index in chain.vtx_indices:\n if vtx_used[vtx_index]:\n raise KidneyOptimException(\"Vertex {} used more than once\".format(vtx_index))\n vtx_used[vtx_index] = True\n \n for cycle in opt_result.cycles:\n for vtx in cycle:\n if vtx_used[vtx.id]:\n raise KidneyOptimException(\"Vertex {} used more than once\".format(vtx.id))\n vtx_used[vtx.id] = True\n\n # cycle and chain caps are respected\n for chain in opt_result.chains:\n if len(chain.vtx_indices) > max_chain:\n raise KidneyOptimException(\"The chain cap is violated\")\n for cycle in opt_result.cycles:\n if len(cycle) > max_cycle:\n raise KidneyOptimException(\"The cycle cap is violated\")\n if not min_chain is None:\n for chain in opt_result.chains:\n if len(chain.vtx_indices) < min_chain:\n raise KidneyOptimException(\"The min-chain cap is violated\")\n\n # # min chain length is respected\n # if cfg.min_chain_len is not None:\n # for chain in opt_result.chains:\n # if len(set(chain.vtx_indices)) < cfg.min_chain_len:\n # raise KidneyOptimException(\"The chain is below the min length (%d):\\n %s\" %\n # (cfg.min_chain_len,chain.display()))\n\n # chains do not contain loops\n for chain in opt_result.chains:\n if len(set(chain.vtx_indices)) < len(chain.vtx_indices):\n raise KidneyOptimException(\"The chain contains loops:\\n %s\" % chain.display())", "def IsTopologicallyValid(*args):\n return _BRepAlgo.brepalgo_IsTopologicallyValid(*args)", "def _validate_update_network(self, context, net_id, original_net,\n net_data):\n extern_net = self._network_is_external(context, net_id)\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not allow QoS on external networks\n if with_qos:\n if extern_net:\n raise nsx_exc.QoSOnExternalNet()\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not support changing external/non-external networks\n if (extnet_apidef.EXTERNAL in net_data and\n net_data[extnet_apidef.EXTERNAL] != extern_net):\n err_msg = _(\"Cannot change the router:external flag of a network\")\n raise n_exc.InvalidInput(error_message=err_msg)\n\n is_ens_net = self._is_ens_tz_net(context, net_id)\n if is_ens_net:\n self._assert_on_ens_with_qos(net_data)", "def check_for_service(self, remote_node, status):\n with remote_node.client() as c:\n r = c.get(\"/node/network\")\n current_status = r.body.json()[\"service_status\"]\n current_cert = r.body.json()[\"service_certificate\"]\n\n expected_cert = open(\n os.path.join(self.common_dir, \"networkcert.pem\"), \"rb\"\n ).read()\n\n assert (\n current_cert == expected_cert[:-1].decode()\n ), \"Current service certificate did not match with networkcert.pem\"\n assert (\n current_status == status.value\n ), f\"Service status {current_status} (expected {status.value})\"", "def brepalgo_IsTopologicallyValid(*args):\n return _BRepAlgo.brepalgo_IsTopologicallyValid(*args)", "def check_if_chain_exist(self, chain):\n user_division_chain = chain\n list_of_chains = self.get_all_user_division_chains()\n res = False\n\n try:\n # Check, if our chain already exists\n assert user_division_chain in list_of_chains\n res = True\n except AssertionError:\n print('Chain doesnt exist')\n finally:\n return res", "def use_service_networking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_service_networking\")", "def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self._fail_network_list = True\n self.configuration.hgst_net = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self._fail_network_list = False" ]
[ "0.5467901", "0.5072414", "0.49279153", "0.49249402", "0.47891456", "0.47063342", "0.4651699", "0.4650548", "0.46425897", "0.46351308", "0.459492", "0.45683298", "0.45626068", "0.4539677", "0.45239413", "0.4492794", "0.4486676", "0.4437604", "0.4424795", "0.44181558", "0.4412061", "0.43883395", "0.43742156", "0.4374076", "0.43735898", "0.4370959", "0.43708888", "0.43646407", "0.4356763", "0.43517148" ]
0.58377224
0
Get a person by the slugged name
def getBySlug( self, person_slug ): qry = """SELECT * FROM `%s`.`people` WHERE `slug` = "%s"; """ % ( self.db_name, Mysql.escape_string( person_slug ) ) person = Mysql.ex( qry ) return person[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_person(name):\n if ' ' in name:\n name = name.replace(',', '')\n else:\n return None\n\n try:\n (first, last) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n try:\n (last, first) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n return None", "def get_specialist(self, name):\n\n specialist = Specialist.query.filter_by(name=name).first()\n return specialist", "def get_by_slug(self, slug):\n return self.get(slug=slug)", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def get_by_slug(self, profile_slug):\n return User.gql(\"WHERE profile_slug = :1\", profile_slug).get()", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def _get_user(self, name: str, users: list, first_time=True) -> Optional[dict]:\n try:\n user = next(filter(lambda x: x['profile'].get('real_name_normalized') == name, users))\n except StopIteration:\n name = slughifi(name).decode('utf-8')\n if first_time:\n return self._get_user(name, users, first_time=False)\n return None\n return user", "def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def get_user_named(self, name: str) -> Union[discord.User, None]:\n result = None\n users = self.users\n\n if len(name) > 5 and name[-5] == \"#\":\n # The 5 length is checking to see if #0000 is in the string,\n # as a#0000 has a length of 6, the minimum for a potential\n # discriminator lookup.\n potential_discriminator = name[-4:]\n\n # do the actual lookup and return if found\n # if it isn't found then we'll do a full name lookup below.\n result = discord.utils.get(users, name=name[:-5], discriminator=potential_discriminator)\n if result is not None:\n return result\n\n def pred(user):\n return user.nick == name or user.name == name\n\n return discord.utils.find(pred, users)", "def get_user_by_slug(self, slug):\n users = self.get_users({ 'profile_url': slug })\n if len(users) > 0:\n return users[0]", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def by_name(cls, name):\n u = cls.all().filter('name =', name).get()\n return u", "def by_name(cls, name):\n return cls.all().filter('name =', name).get()", "def get_person(request, person_id):\n person = get_object_or_404(Person, pk=person_id)\n\n\n return render_to_response('people/person_detail.html', {\n 'person': person,\n })", "def find_by_name(self, name):\n return self.get(name)", "def getByName( self, people_name ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `name` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_name ) )\n person = Mysql.ex( qry )\n if len( person ) == 0:\n return False\n return person[0]", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def PolonaSlug(title:str):\n e_id = PolonaGetFirst(title)\n\n #Get data of an entity\n URL='https://polona.pl/api/entities/'+e_id+'/'\n\n r = requests.get(URL,None)\n data = r.json()\n \n return data['slug']", "def query_person_titles(self, name: str): #-> cursor object\n if not self.client:\n self.connect()\n query = templates.query_titles_by_person(name)\n return self.db.find(query).limit(30)", "def read_one(ppname):\n if ppname in pp_dict:\n pilotpoint = pp_dict.get(ppname)\n else:\n abort(\n 404, \"Person with last name {ppname} not found\".format(ppname=ppname)\n )\n return pilotpoint", "def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain", "def getMember(unique_name):", "def getMember(unique_name):", "def parse_name_actor(soup, pageurl):\n\t# find fn (fullname) class\n\tname_span = soup.findAll('span', {'class': 'fn'});\n\t# if class does not exist, cannot get name\n\tif len(name_span) == 0:\n\t\tlogging.warning('' + pageurl + ' does not contain a name for the actor and will not be parsed')\n\t\treturn None\n\tname = name_span[0].get_text()\n\t# handle edge cases where HTML is butchered - cannot convert to JSON if this goes through\n\tif '<' in name:\n\t\tlogging.warning('' + pageurl + ' does not contain a name for the actor and will not be parsed')\n\t\treturn None\n\treturn name", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def lookup(name):" ]
[ "0.70989084", "0.62111163", "0.6066751", "0.60478956", "0.60363996", "0.6035082", "0.6015536", "0.60052705", "0.60052705", "0.5968631", "0.5958393", "0.5953464", "0.59316057", "0.5920444", "0.5912399", "0.5907915", "0.59019643", "0.5862391", "0.58374894", "0.58150226", "0.5787447", "0.5772161", "0.5698589", "0.56950414", "0.5686507", "0.5620296", "0.5620296", "0.5619041", "0.56187874", "0.56094515" ]
0.76045847
0
Get a person by the exact name
def getByName( self, people_name ): qry = """SELECT * FROM `%s`.`people` WHERE `name` = "%s"; """ % ( self.db_name, Mysql.escape_string( person_name ) ) person = Mysql.ex( qry ) if len( person ) == 0: return False return person[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_person(name):\n if ' ' in name:\n name = name.replace(',', '')\n else:\n return None\n\n try:\n (first, last) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n try:\n (last, first) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n return None", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def by_name(cls, name):\n return cls.all().filter('name =', name).get()", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def get_user_named(self, name: str) -> Union[discord.User, None]:\n result = None\n users = self.users\n\n if len(name) > 5 and name[-5] == \"#\":\n # The 5 length is checking to see if #0000 is in the string,\n # as a#0000 has a length of 6, the minimum for a potential\n # discriminator lookup.\n potential_discriminator = name[-4:]\n\n # do the actual lookup and return if found\n # if it isn't found then we'll do a full name lookup below.\n result = discord.utils.get(users, name=name[:-5], discriminator=potential_discriminator)\n if result is not None:\n return result\n\n def pred(user):\n return user.nick == name or user.name == name\n\n return discord.utils.find(pred, users)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def get_person(self, id):\n PERSON = \"\"\"SELECT name FROM Person\n WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Person WHERE id = %s\"\"\", (id,))\n self.db_cursor.execute(PERSON, (id,))\n self.db_connection.commit()\n p_attribs = self.db_cursor.fetchall()\n ret = Person()\n ret.name = p_attribs[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve person: \" + str(id))\n return None\n\n return ret", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def find_by_name(self, name):\n return self.get(name)", "def getMember(unique_name):", "def getMember(unique_name):", "def getMemberFromName(self, name):\n for member in self.playersAndRoles:\n if name in member.user.name:\n return member", "def by_name(cls, name):\n u = cls.all().filter('name =', name).get()\n return u", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def get_by_name(name: str):\n logger.debug('Retrieving employee by name %s.', name)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.name == name\n ).scalar()\n except Exception as exception:\n logger.error('An error occurred while retrieving employee by name %s.'\n ' Exception: %s', name, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by name %s.', name)\n return employee", "def search_for_name(self, name):\n for p in self.books_all:\n if p['name'] == name:\n return p", "def read_one(ppname):\n if ppname in pp_dict:\n pilotpoint = pp_dict.get(ppname)\n else:\n abort(\n 404, \"Person with last name {ppname} not found\".format(ppname=ppname)\n )\n return pilotpoint", "def find_by_name(name):\n return repository.find_by_name(name)" ]
[ "0.7679349", "0.72173005", "0.7037681", "0.6901341", "0.6892602", "0.6857909", "0.6857909", "0.68539655", "0.6847011", "0.68377894", "0.6833678", "0.68050104", "0.67762274", "0.6773553", "0.6739922", "0.6739922", "0.6681286", "0.6669767", "0.6653531", "0.6648262", "0.66175056", "0.66108286", "0.65630513", "0.6559298", "0.6559298", "0.6559298", "0.654941", "0.65392816", "0.6530697", "0.6485232" ]
0.74054974
1
Get a person by the LIKE wiki url
def getByWiki( self, wikipedia_url ): qry = 'SELECT * FROM `'+self.db_name+'`.`people` WHERE `wikipedia` LIKE "%'+ Mysql.escape_string( wikipedia_url ) + '%";' person = Mysql.ex( qry ) return person[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def wiki(self, ctx, *, parse: str):\n parse = parse.split(' ', 1)\n anti = [\"antibirth\", \"anti\"]\n rev = [\"revelations\", \"rev\"]\n subdomain = \"antibirth\" if parse[0] in anti else \"tboirevelations\" if parse[0] in rev \\\n else \"bindingofisaacrebirth\"\n parse = ' '.join(parse) if subdomain == \"bindingofisaacrebirth\" else parse[1]\n page = requests.get(f\"https://{subdomain}.gamepedia.com/index.php?search={parse}\")\n if \"search\" in page.url:\n soup = BeautifulSoup(page.content, 'html.parser')\n if re.sub(r'\\W+', '', parse.lower()) == \\\n re.sub(r'\\W+', '', soup.find(class_=\"unified-search__result__title\").get(\"data-title\").lower()):\n await ctx.send(soup.find(class_=\"unified-search__result__title\").get(\"href\"))\n else:\n await ctx.send(f\"I couldn't find an exact match. Here is a link to this query's search page. {page.url}\")\n else: await ctx.send(page.url)", "def show_person(szemelyi_szam):\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability\n cur.execute('SELECT nev FROM oktatas.szemelyek WHERE szemelyi_szam = :sz',\n sz=szemelyi_szam)\n # fetchone() returns a single row if there's one, otherwise None\n result = cur.fetchone()\n # in Python '==' compares by value, 'is' compares by reference\n # (of course, former would work too, but it's slower and unnecessary)\n # 'None' is the Python version of null, it's a singleton object, so\n # we can safely compare to it using 'is' (Java/C#: result == null)\n if result is None:\n # no rows -> 404 Not Found (no need to return manually)\n abort(404)\n links = []\n try:\n # we query the Wikipedia API to see what happened the day\n # the person was born based on szemelyi_szam\n born = datetime.strptime(szemelyi_szam[1:7], '%y%m%d')\n params = {\n 'action': 'query',\n # 2012-04-01 -> \"April 01\" -> \"April 1\"\n 'titles': born.strftime('%B %d').replace('0', ''),\n 'prop': 'extlinks',\n 'format': 'json',\n }\n # API docs: http://www.mediawiki.org/wiki/API:Tutorial\n # Example for 1st April:\n # https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extlinks&titles=April%201\n res = requests.get('https://en.wikipedia.org/w/api.php', params=params)\n for page in res.json()['query']['pages'].itervalues():\n for link in page['extlinks']:\n for href in link.itervalues():\n links.append(href)\n except IOError:\n pass # necessary if a clause would be empty in Python\n\n # result set rows can be indexed too\n return jsonify(nev=result[0], links=links)\n finally:\n cur.close()\n finally:\n conn.close()", "def show_person(szemelyi_szam):\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Note: don't use prefixes like \"oktatas.\" above for tables\n # within your own schema, as it ruins portability\n cur.execute('SELECT nev FROM oktatas.szemelyek WHERE szemelyi_szam = :sz',\n sz=szemelyi_szam)\n # fetchone() returns a single row if there's one, otherwise None\n result = cur.fetchone()\n # in Python '==' compares by value, 'is' compares by reference\n # (of course, former would work too, but it's slower and unnecessary)\n # 'None' is the Python version of null, it's a singleton object, so\n # we can safely compare to it using 'is' (Java/C#: result == null)\n if result is None:\n # no rows -> 404 Not Found (no need to return manually)\n abort(404)\n links = []\n try:\n # we query the Wikipedia API to see what happened the day\n # the person was born based on szemelyi_szam\n born = datetime.strptime(szemelyi_szam[1:7], '%y%m%d')\n params = {\n 'action': 'query',\n # 2012-04-01 -> \"April 01\" -> \"April 1\"\n 'titles': born.strftime('%B %d').replace('0', ''),\n 'prop': 'extlinks',\n 'format': 'json',\n }\n # API docs: http://www.mediawiki.org/wiki/API:Tutorial\n # Example for 1st April:\n # https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extlinks&titles=April%201\n res = requests.get('https://en.wikipedia.org/w/api.php', params=params)\n for page in res.json()['query']['pages'].itervalues():\n for link in page['extlinks']:\n for href in link.itervalues():\n links.append(href)\n except IOError:\n pass # necessary if a clause would be empty in Python\n\n # result set rows can be indexed too\n return jsonify(nev=result[0], links=links)\n finally:\n cur.close()\n finally:\n conn.close()", "def match_userpage(address):\n user_page_fmt = re.compile(r'^.*stackoverflow\\.com\\/users\\/\\d+\\/\\(.*\\)$')\n username = user_page_fmt.match(address)\n if username:\n return username.group(0)\n else:\n return None", "def getBySlug( self, person_slug ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `slug` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_slug ) )\n person = Mysql.ex( qry )\n return person[0]", "def user(inp):\n user = inp.text.lower().replace(' ', '-')\n return 'http://www.wikidot.com/user:info/' + user", "def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def search(user, param):\r\n if len(param) <= 2:\r\n return bad_request(error_messages['too_short'])\r\n return search_user(param.lower(), user)", "def get_object_with_player(self, search_string):\n search_string = utils.to_unicode(search_string).lstrip('*') \n dbref = self.dbref(search_string)\n if not dbref: \n # not a dbref. Search by name.\n player_matches = User.objects.filter(username__iexact=search_string)\n if player_matches:\n dbref = player_matches[0].id\n # use the id to find the player\n return self.get_object_with_user(dbref)", "def query(url):", "def find_user_like(self, query):\n # if cache is empty, fill it\n if self.user_cache is None:\n self.user_cache = self.get_users()\n\n # if it's still empty, something's wrong\n if self.user_cache is not None:\n # search the names first\n for user in self.user_cache:\n if query in user[\"name\"]:\n return user\n # then search the emails\n for user in self.user_cache:\n if query in user[\"email\"]:\n return user\n return None", "async def wikipedia(self, ctx, *args):\n if args[0] == 'random':\n search_string = wp.random()\n else:\n search_string = ' '.join(args)\n try:\n page = wp.page(search_string)\n await ctx.send(page.url)\n self.logger.info(misolog.format_log(ctx, f\"\"))\n except wp.exceptions.DisambiguationError as error:\n await ctx.send(f\"```{str(error)}```\")\n self.logger.info(misolog.format_log(ctx, f\"Disambiguation page\"))", "def get_wiki_annotation(url):\n url_title = fully_unquote(url.split('/')[-1]).replace('_', ' ')\n params = {\n 'action': 'query',\n 'redirects': True,\n 'format': 'json',\n 'titles': url_title\n }\n wiki_response = requests.get(wiki_api_base, params)\n wiki_json = wiki_response.json()\n assert len(wiki_json['query']['pages'].keys()) == 1\n id_ = list(wiki_json['query']['pages'].keys())[0]\n wiki_title = wiki_json['query']['pages'][id_]['title']\n # wiki_url = 'https://en.wikipedia.org/wiki/{}'.format(title.replace(' ', '_'))\n return wiki_title", "def get_contributor(username):\n\n return utils.fetch('contributor/%s' % username)", "def get_wikipedia_url(query):\n sitew = pywikibot.Site(\"en\", \"wikipedia\")\n result = None\n print(\"looking up:\", query)\n search = sitew.search(\n query, where=\"title\", get_redirects=True, total=1, content=False, namespaces=\"0\"\n )\n for page in search:\n if page.isRedirectPage():\n page = page.getRedirectTarget()\n result = page.full_url()\n break\n\n return result", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "async def cmd_wiki(self, args: Args, src: Src, **_):\n if not args:\n return \"Wikipedia, the Free Encyclopedia\\nhttps://en.wikipedia.org/\"\n query = \" \".join(args)\n self.log.f(\"wiki\", \"Query string: \" + query)\n\n response = Pidgeon(query).get_summary()\n title = response[1][\"title\"]\n url = \"https://en.wikipedia.org/wiki/\" + title\n if response[0] == 0:\n return response[1]\n else:\n if \"may refer to:\" in response[1][\"content\"]:\n em = discord.Embed(color=0xFFCC33)\n em.add_field(\n name=\"Developer Note\",\n value=\"It looks like this entry may have multiple results, \"\n \"try to refine your search for better accuracy.\",\n )\n\n else:\n em = discord.Embed(color=0xF8F9FA, description=response[1][\"content\"])\n em.set_author(\n name=\"'{}' on Wikipedia\".format(title),\n url=url,\n icon_url=\"https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png\",\n )\n\n await self.client.embed(src.channel, em)", "def get_by_username(cls, username):\n return cls.objects.get(username__iexact=username)", "def lookup_word(word):\n\n return API.get_response(word)", "def wikidata_search(request, str):\n url_head = 'https://www.wikidata.org/w/api.php?action=wbsearchentities&search='\n url_tail = '&language=en&format=json'\n if request.method == 'GET':\n r = requests.get(url_head+str+url_tail);\n return Response(r.json()['search'])\n #print r", "def match_userlist(address):\n userlist_subpage = re.compile(r'^.*stackoverflow\\.com\\/users?page=\\(\\d+\\).*$')\n top_userlist = re.compile(r'^.*stackoverflow\\.com\\/users$')\n\n subpage = userlist_subpage.match(address)\n if subpage:\n return subpage.group(0)\n\n top = top_userlist.match(address)\n if top:\n return conf.mainpagename\n\n else:\n return None", "def search_username(ausername):\n print \"Searching: \" + ausername + \" -> \",\n url = \"http://www.findmyfbid.com/\"\n post_data = \"https://www.facebook.com/\" + ausername\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:42.0) Gecko/20100101 Firefox/42.0\"\n headers = {'User-Agent': user_agent}\n req = requests.post(url, headers=headers, data = { \"url\": post_data})\n html_data = req.text\n soup = BeautifulSoup(html_data, 'html.parser')\n resp = str(soup.code)\n ugly1 = resp.split(\">\")\n ugly2 = ugly1[1].split(\"<\")\n if resp == \"<code>https://www.facebook.com</code>\":\n print \"No ID found :(\"\n else:\n print ugly2[0]", "def wikipedia_search(result,sentence):\r\n result = wikipedia.summary(result,sentences=sentence)\r\n return result", "def search_result(conn, url):\n sql = ''' SELECT * FROM results WHERE url= \\\"'''\n cur = conn.cursor()\n cur.execute(sql + url + \"\\\"\")\n return cur.fetchone()", "def query_by_name(url, params, name):\n params[\"query\"] = \"artist:\" + name\n return query_site(url, params)", "def search_for_meme(self, search):\n cursor = self.conn.cursor()\n cursor.execute(f\"select * from memes where lower(meme_name) like ?\", (f'%{search}%', ))\n results = cursor.fetchall()\n cursor.close()\n return results", "def parse_redditor(self, word):\n # Parse the possible username\n username = util.parse_username(word[3:])\n print(\"\\nPossible match: '\" + username + \"'...\", end=\"\")\n\n # Get the redditor\n try:\n redditor = self.reddit.get_redditor(username)\n except Exception as e:\n print(e)\n return None\n\n # Username was valid\n print('[MATCH FOUND]')\n return redditor", "def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person", "def crawl_user(username):\r\n url_to_parse = 'http://habrahabr.ru/users/' + username + '/' \r\n root = ut.doc4url(url_to_parse)\r\n\r\n def get_set(css_class_name, set_num=0):\r\n \"\"\"\r\n Find in the page list of some hyperlinked properties\r\n (such as friends, interests, etc)\r\n and return a set of them.\r\n \"\"\"\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set\r\n\r\n user = so.SmartObject({\r\n 'interests' : get_set('interests'),\r\n 'companies' : get_set('companies_list'),\r\n 'friends' : get_set('friends_list'),\r\n 'hubs' : get_set('hubs_list'),\r\n 'invitees': get_set('friends_list', 1)\r\n }) \r\n return user" ]
[ "0.5960064", "0.5766146", "0.5766146", "0.5717716", "0.567234", "0.5618703", "0.55293447", "0.5504833", "0.54560703", "0.54344946", "0.5419914", "0.53738326", "0.53666824", "0.53523976", "0.5303817", "0.5298844", "0.5276349", "0.52679694", "0.52637225", "0.52612007", "0.522159", "0.52103287", "0.52044106", "0.5198485", "0.5179806", "0.5175744", "0.5165498", "0.5163388", "0.51523006", "0.5152114" ]
0.78201723
0
getRandom Gets a random person person
def getRandom( self ): import random count = Mysql.ex( "SELECT count(*) AS c FROM `%s`.`people`;" % self.db_name ) the_id = random.randint( 1, count[0]['c'] ) people = self.getByID( the_id ) return people
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_user():\n return random.choice(User.query.all())", "def random_image(context, person):\n collection = db['people']\n images = collection.find({'person': person})\n row = []\n for image in images:\n row.append(image['image_url'])\n rand_img = random.choice(list(row))\n return context.channel.send(rand_img)", "def sample_personas(self):\n persona = self.personas_list\n n = constants.CURATED_PERSONA_CHOICES\n logging.info(\n f'Randomly choosing {n} personas from {len(persona)} available ones.'\n )\n if self.persona_replacement:\n return random.sample(persona, k=n)\n else:\n return [persona.pop() for _ in range(n)]", "def get_random_male_name ():\n return db_random_pop_default(DB_FIRST_MALE, \"John\")", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def getRandom(self):\n return random.choice(self.data)", "def random_user():\n\tcount = User.objects.count()\n\treturn User.objects.limit(-1).skip(randint(0,count-1)).next()", "def random():\r\n return R.NextDouble()", "def random(self):\r\n return random.randint(1, 4)", "def get_random_individual():\r\n return [ random.random() for _ in range(PARAMETERS_COUNT) ]", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def getRandom(self):\n # pick a random number from the list\n return random.choice(self.nums)", "def get_random_object():\n\n return random.choice([\n get_random_alphabetic_string,\n get_random_alphanumeric_string,\n get_random_integer,\n get_random_real_number\n ])()", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def get_random_pet():\n resp = HTTP_request.get(' https://api.petfinder.com/v2/animals',\n params={\n \"limit\": 100,\n },\n headers={\"Authorization\": f\"Bearer {pet_finder_token}\"})\n\n pets = resp.json()[\"animals\"]\n\n random_pet = random.choice(pets)\n\n return {\"name\": random_pet[\"name\"], \"age\": random_pet[\"age\"], \"photo_url\": random_pet[\"photos\"][0][\"medium\"]}", "def getRandom(self):\n return random.choice(self.ls)", "def rand(self):\n raise NotImplementedError", "def randomLeggings():\n return random.choice(LEGGINGS)", "def getRandom(self):\n return self.nums[random.randint(0, len(self.nums) - 1)]\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def get_random(self):\n return random.choice(self.proxies)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def name():\r\n return _random.choice([male_first(), female_first()])", "def random_team_member(self):\n TeamMember = apps.get_model('projects', 'TeamMember')\n team_member_queryset = TeamMember.objects.live().descendant_of(self)\n max_id = team_member_queryset.aggregate(max_id=Max(\"id\"))['max_id']\n if max_id is None:\n # there are no team members specified\n return None\n while True:\n pk = random.randint(1, max_id)\n team_member = team_member_queryset.filter(pk=pk).first()\n if team_member:\n return team_member.specific", "def random_girl(self):\n return [result for result in self._db.girls.find().limit(1) \\\n .skip(random.randrange(self._db.girls.count()))][0]", "def getRandom(self) -> int:\n return random.choice(list(self.d.keys()))", "def retrieve(team, year):\n return random.randint(100,200)", "def person(languages=None, genders=None):\n languages = languages or ['en']\n genders = genders or (GENDER_FEMALE, GENDER_MALE)\n\n\n lang = random.choice(languages)\n g = random.choice(genders)\n t = title([lang], [g])\n return first_name([lang], [g]), last_name([lang]), t, g", "def generate_RME():\n RME = [\"ogre\", \"goblin\", \"gnoll\", \"orc\", \"personal injury lawyer\"]\n monster = random.choice(RME)\n return monster", "def random (self, checkfn=None):\n if len(self) == 0:\n return None\n return self.random_pick(checkfn=checkfn)[1]" ]
[ "0.6894421", "0.6801399", "0.6705311", "0.6636933", "0.6617121", "0.6617121", "0.66101205", "0.654614", "0.6543784", "0.65312624", "0.6529804", "0.6522531", "0.6499706", "0.649846", "0.64737636", "0.6387686", "0.6385814", "0.6367649", "0.6353488", "0.6353425", "0.6351073", "0.634447", "0.63352597", "0.63346606", "0.6281401", "0.62441635", "0.6243421", "0.62287545", "0.62240016", "0.6219978" ]
0.77666205
0
Updates a person record by a diff of the values
def updateDiff( self, person_new, person_rec ): person_id = person_rec['id'] diff = {} if 'slug' in person_new and person_new['slug'] != person_rec['slug']: diff['slug'] = person_new['slug'] if 'wikipedia' in person_new and person_new['wikipedia'] != person_rec['wikipedia']: diff['wikipedia'] = person_new['wikipedia'] if len( diff ) > 0: diff['date_updated'] = Mysql.now() Mysql.update( 'people', diff, { 'id' : person_id } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(lname, person):\n # Does the person exist in people?\n if lname in PEOPLE:\n PEOPLE[lname][\"fname\"] = person.get(\"fname\")\n PEOPLE[lname][\"timestamp\"] = get_timestamp()\n\n return PEOPLE[lname]\n\n # otherwise, nope, that's an error\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )", "def update_record(self):\n # print(self.get_hours_diff())\n conn = sqlite3.connect(\"LmtPilots.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Pilots_hours\")\n rowids = [row[0] for row in cur.execute('SELECT rowid FROM Pilots_hours')]\n cur.executemany('UPDATE Pilots_hours SET total=? WHERE id=?', zip(self.get_hours_diff(), rowids))\n conn.commit()\n self.db_model2.select()\n # print(self.get_tot_hours())", "def test_update_person(self) -> None:\n\n self.assertIsInstance(self.movement.update_persons(self.pop.get_person(),len(self.pop.get_person())), np.ndarray)\n self.pop.persons[:,idx.speed] = 0.1\n self.assertNotEqual(self.movement.update_persons(self.pop.get_person(),\n len(self.pop.get_person()),heading_update_chance=1)[:,idx.y_dir].any(), 0)\n self.assertNotEqual(self.movement.update_persons(self.pop.get_person(),\n len(self.pop.get_person()),heading_update_chance=1)[:,idx.x_dir].any(), 0)\n self.assertNotEqual(self.movement.update_persons(self.pop.get_person(),\n len(self.pop.get_person()),heading_update_chance=1)[:,idx.speed].any(), 0.1)", "def test_updating_record_with_kwargs(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, age=10)\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10", "def add_or_edit_person(date, amount, name):\n if name not in names:\n person_new = {}\n person_new.update({\"name\": name})\n person_new.update({\"amount\": amount})\n person_new.update({\"date\": date})\n data_people.append(person_new)\n names.append(name)\n else:\n person_edit = next(filter(lambda person: person['name'] == name,\n data_people))\n person_edit[\"amount\"] += amount", "def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")", "def test_updating_record_with_dictionary_args(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, {\"age\": 10})\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10", "def test_update_person(self):\n user = User.objects.create(username='test_user')\n user.set_password('test123')\n user.save()\n self.client.login(username='test_user', password='test123')\n\n data = {'first_name': 'Daenerys'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Person.objects.count(), 1)\n self.assertEqual(Person.objects.first().first_name, 'Daenerys')", "def api_can_update(self):\n person1 = User(name=\"test person1\",\n bio=\"test person1\",\n contact_info=\"test person\")\n person2 = User(name=\"test person2\",\n bio=\"test person2\",\n contact_info=\"test person\")\n person1.save()\n person2.save()\n # update_person = self.client.put(\n # reverse('details', kwargs={'pk': person1.id}),\n # person2, format='json'\n # )\n self.assertEqual(self.client.get('/api/guru'), 200)", "def test_update_delta(civic, diff, delta, updated_data, main_data):\n civic._update_delta(delta, 'genes', updated_data['genes'],\n main_data['genes'])\n assert delta['genes']['UPDATE'] == diff['genes']['UPDATE']\n\n civic._update_delta(delta, 'variants', updated_data['variants'],\n main_data['variants'])\n assert delta['variants']['UPDATE'] == diff['variants']['UPDATE']\n\n civic._update_delta(delta, 'evidence', updated_data['evidence'],\n main_data['evidence'])\n assert delta['evidence']['UPDATE'] == diff['evidence']['UPDATE']", "def update_record():\n global data_base, table, choice, res, output1, result, column_names, choice_row, number_str\n a = output1.get('1.0', END)\n a = a[0:-1]\n changed_string = a.split('\\n')\n changed_string = changed_string[0:-2]\n\n number_str = int(number_str) - 1\n source_string = []\n for i in result:\n for j in i:\n source_string.append(j)\n\n for i in range(0, 4):\n try:\n if changed_string[i] == source_string[i]:\n pass\n except IndexError:\n pass\n else:\n param_value = changed_string[i]\n step = i\n param_column = column_names[step]\n sqlite3_update_record(data_base, table, param_column, param_value, choice_row, res)\n output1.delete(1.0, END)", "def test_updating_record_with_both_dictionary_args_and_kwargs(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(\n person, {\"first_name\": \"Stephen\"}, age=10\n )\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10\n assert u_person.first_name == \"Stephen\"", "def run_update_example():\n table = \"actors\"\n update_values = {\n 'name': \"Christopher\",\n 'last_name': \"Eccleston\"}\n update_conds = {'id': 1}\n print querify.update_from_dict(table, update_values, update_conds)", "def update(max_iterations):\n persons = get_persons()\n count = 0\n for person in persons:\n if count > max_iterations:\n return\n count += 1\n if choice([0, 1]):\n new_person = make_random('en')\n new_person['id'] = person['id']\n params = {\"event\": \"contact.update\",\n \"data\": new_person}\n request(params)", "def difference_update(self, *others):\r\n return self.sdiffstore(self.r_key, slef.r_key, *[o.r_key for o in others])", "def edit_person():\n # get person name from user\n responses = accept_inputs([\"Person's name\"])\n person_name = responses[\"Person's name\"]\n # check for existence\n results = query_with_results(\"select * from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s'.\" % person_name)\n return\n else:\n # get id of person\n id = query_with_results(\"select id from person where name = ?\", [person_name])[0][0]\n # the task exists, so ask the user for the new description\n responses = accept_inputs([\"New name\"])\n # update db\n query_no_results(\"update person set name = ? where id = ?\", [responses[\"New name\"], id])\n print(\"Person with old name '%s' changed to '%s'.\" % (person_name, responses[\"New name\"]))", "def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items", "def change_entry_in_record(d):\n\n print(\"\\nEnter the name of the person for which you'd like to change information\")\n firstname = input('First name: ')\n lastname = input('Last name: ')\n\n for pid in d:\n if firstname == d[pid].get('First name') and lastname == d[pid].get('Last name'):\n print(\"\\n# We have located your friend. Please enter which entry you'd like to change.\\n\"\n \"1: First name\\n\"\n \"2: Last name\\n\"\n \"3: Phone number\\n\"\n \"4: Address\")\n entry = input(\"Enter a number between 1-4: \")\n\n if entry == '1':\n d[pid]['First name'] = input('Enter a new first name: ')\n print('\\n# First name has been changed')\n elif entry == '2':\n d[pid]['Last name'] = input('Enter a new last name: ')\n print('\\n# Last name has been changed')\n elif entry == '3':\n d[pid]['Phone'] = input('Enter a new phone number: ')\n print('\\n# Phone number has been changed')\n elif entry == '4':\n d[pid]['Adress'] = input('Enter a new address: ')\n print('\\n# Address has been changed')\n else:\n print('\\n# 404 - Page cannot be found')\n\n return d\n print('# The contact is not in the phone book')", "def test_update_record(self):\n pass", "def _diff(self, param, diff):\n pass", "def update_dict(new,old):", "def test_update_values(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n\n # independently save over a new count value, unknown to original instance\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n m1.count = 6\n m1.save()\n\n # update the text, and call update\n m0.update(text='monkey land')\n self.assertEqual(m0.text, 'monkey land')\n\n # database should reflect both updates\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n self.assertEqual(m2.count, m1.count)\n self.assertEqual(m2.text, m0.text)", "def test_update_values(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n\r\n # independently save over a new count value, unknown to original instance\r\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n m1.count = 6\r\n m1.save()\r\n\r\n # update the text, and call update\r\n m0.update(text='monkey land')\r\n self.assertEqual(m0.text, 'monkey land')\r\n\r\n # database should reflect both updates\r\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n self.assertEqual(m2.count, m1.count)\r\n self.assertEqual(m2.text, m0.text)", "def test_update_multiple(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 14}, age=12)\n assert n_updated == 2\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 14\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update__endtoend__3(\n address_book, FieldFactory, UpdateablePersonFactory, browser):\n field_name = FieldFactory(\n address_book, IPerson, 'Bool', u'Ever met').__name__\n UpdateablePersonFactory(address_book, **{field_name: False})\n browser.login('mgr')\n browser.keyword_search(KEYWORD, apply='Update')\n browser.getControl('field').displayValue = ['person -- Ever met']\n browser.getControl('Next').click()\n browser.getControl('yes').click()\n browser.getControl('operation').displayValue = [\n 'replace existing value with new one']\n browser.getControl('Next').click()\n # Update sets the value to 'yes':\n assert '<td>Tester</td><td>yes</td>' in browser.contents_without_whitespace", "def update(self, values):\n pass", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def test_update__endtoend__2(search_data, browser):\n browser.login('mgr')\n browser.keyword_search('family', apply='Update')\n browser.getControl('field').displayValue = ['person -- last name']\n browser.getControl('Next').click()\n browser.getControl('new value', index=0).value = ''\n browser.getControl('operation').displayValue = [\n 'append new value to existing one']\n browser.getControl('Next').click()\n # The last name column is displayed as a link column it contains the\n # unchanged last name:\n assert ('<td><a href=\"http://localhost/ab/Person-2\">Koch</a></td>' in\n browser.contents)", "def update(table, id_):\n\n # 4\n for index in range(len(table)):\n if table[index][0] == id_:\n addnew = ui.get_inputs(\n ['name: ', 'birth_year: '],\n 'Updating list of hr')\n addnew.insert(0, id_)\n table[index] = addnew\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n pass" ]
[ "0.6076236", "0.5890265", "0.5691879", "0.56393296", "0.5562567", "0.556077", "0.5553972", "0.55507255", "0.5535648", "0.5531998", "0.54642683", "0.53892994", "0.5387568", "0.5334643", "0.5320626", "0.5311849", "0.52786183", "0.5250379", "0.5241066", "0.5220597", "0.5213094", "0.52032846", "0.51976603", "0.51953715", "0.5192142", "0.5190126", "0.51803696", "0.5166258", "0.51507205", "0.5143531" ]
0.73809475
0
Determine the minum of three values min=x
def minimum(x,y,z): return min(min(x,y),z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min(x):\n pass", "def localmin(x):\r\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def localmin(x):\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def Min(data):\n return data.min()", "def x_min(self):\n return self.get_min_value(self.X_INDEX)", "def calc_min(data: list) -> float:\n acc = data[0]\n for n in data:\n if n < acc:\n acc = n\n return float(acc)", "def minimum(self):\n return min(self.numbers)", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def getMinX(self):\n return self.minx", "def find_local_min_x(self, Ns=None):\n if Ns is None:\n Ns = self.num\n with self.fix_evaluator():\n params = np.linspace(0, np.pi, Ns)\n dx_func = lambda param: self.diff(param)[0]\n dx = [dx_func(param) for param in params]\n # roots of dx are extrema of x\n roots = find_all_roots(params, dx, func=dx_func)\n if len(roots) < 3: # need at least two maxima and a minimum\n return None\n # take the interior root with smallest x-value\n return min(roots[1:-1], key=lambda param: self(param)[0])", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def my_func(a, b, c):\r\n return (a + b + c) - min(a, b, c)", "def getXmin(self):\n return min(self.p1.x, self.p2.x)", "def minimum(x, y):\r\n # see decorator for function body\r", "def cmin(self):\n return self[\"cmin\"]", "def cmin(self):\n return self['cmin']", "def _get_minimum(self):\n return self._minimum", "def getmin(self):\n\n return self.X", "def get_minimum():\n return [\n convert_variables([0.78547, 0.78547, 0.78547]),\n ]", "def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def fmin(items):\n if len(items) == 0:\n return 0.\n\n return min(items)", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def _get_min_positive_value(self, a, b):\n if a < 0 and b >= 0:\n return b\n if a >= 0 and b < 0:\n return a\n return min(a, b)", "def get_min(fun):\n\tglobal __dataset\n\n\tmin_val = sys.maxint\n\tmin_index = 0\n\tfor i, vec in enumerate(__dataset):\n\t\tret = fun(vec)\n\t\tif ret < min_val:\n\t\t\tmin_val = ret\n\t\t\tmin_index = i\n\treturn min_index, min_val" ]
[ "0.7294391", "0.71236384", "0.709427", "0.7075521", "0.70469224", "0.69684076", "0.6939228", "0.6930442", "0.6890841", "0.6890841", "0.68730426", "0.68183523", "0.6759273", "0.67541385", "0.67298275", "0.67280006", "0.67271304", "0.67210686", "0.6683923", "0.6676451", "0.6675343", "0.6670654", "0.66513765", "0.6646325", "0.6646325", "0.66446555", "0.66197234", "0.66197234", "0.6617119", "0.66100276" ]
0.8020329
0
Print source matrix for visual representation
def print_matrix(matrix,source,target): print(u"\t (s)\u2192 \t",end='') for c in target: print("%2s\t"%c,end='') print() for x in range(0,len(matrix)): if(x==0): print(u"(t)\u2193 \t",end='') else: print("%2s\t"%source[x-1],end='') for y in range(0,len(matrix[0])): #print("%2d (%d,%d)\t"%(matrix[x][y],x,y),end='') print("%2d \t"%(matrix[x][y]),end='') print("")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_matrix(self):\n print(self.matrix)", "def PrintMatrix(self):\n # loop through the rows\n for i in range(self.rows):\n # intialise the matrix\n mat = []\n # loop through the column\n for j in range(self.cols):\n # append matrix element\n mat.append(self.matrix[i][j])\n # print the matrix\n print(mat)", "def show(self):\n\t\tprint(\"Square Matrix:\")\n\t\tfor i in range(0, len(self.lables)):\n\t\t\tprint(self.matrix[i])", "def __repr__(self):\n return repr(self.matrix)", "def print_matrix(matrix):\n [print(*line) for line in matrix]", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_matrices(self):\n\n \"\"\"\n Print Optimal Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Optimal Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\")\n else:\n print('\\t', end=\"\")\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.optimal[i][j]) + '\\t', end=\"\"),\n print(\"\")\n\n \"\"\"\n Print Direction Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Direction Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\"),\n else:\n print('\\t', end=\"\"),\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.direction[i][j]) + '\\t', end=\"\"),\n print(\"\")", "def __repr__(self):\n return self.matrix and '\\n'.join([\"|%s|\" % s for s in [' '.join([\"%-6.3f\" % e for e in w]) for w in self.matrix]]) or \"<pusta macierz>\"", "def print_maze_contents(self):\r\n for row in range(0, self.__ny):\r\n print(\"row \", row)\r\n for col in range(0, self.__nx):\r\n print(self.__maze[row][col].__str__())\r\n print()", "def showMatrix(self, frame, matrix, label=''): \n M = self.matrix2Table(matrix)\n mtable = self.showTable(frame, M, label)\n return mtable", "def pretty_print(self):\r\n out = \"\"\r\n\r\n rows,cols = self.matrix.shape\r\n\r\n for row in xrange(0,rows):\r\n out += \"[\"\r\n\r\n for col in xrange(0,cols):\r\n out += \"%+0.2f \"%self.matrix[row][col]\r\n out += \"]\\n\"\r\n\r\n return out", "def log_transformation_matrix(self):\n\t\tprint(f'cube({self.width}, {self.height}, {self.length}):')\n\n\t\trows = self.matrix.tolist()\n\n\t\tfor row in rows:\n\t\t\tprint(list(map(self.trunc, row)))", "def print_matrix(M):\n print(\"printing Matrix\")\n for row in M:\n for val in row:\n print'{:3}'.format(val),\n print", "def mprint(self):\n for i in range(len(self.matrix)):\n for j in self.matrix[i]:\n print(j, end=\" \")\n print()\n pass", "def print_matrix(s, t, m, p=2):\n rows = len(m)\n cols = len(m[0])\n output = \" \"\n\n # print target word across top of matrix\n output += \" #\" + \" \" * p\n for i in range(cols - 1):\n output += str(t[i]) + \" \" * p\n output += \"\\n\"\n\n # print source word vertically before each row\n for r in range(rows):\n if r > 0:\n output += str(s[r - 1]) + \" [\"\n else:\n output += \"# [\"\n\n # print matrix rows\n for c in range(cols):\n output += str(m[r][c])\n\n if c == cols - 1:\n output += \"]\\n\"\n else:\n output += \", \"\n\n print(output)\n return output", "def __str__(self) -> str:\n\t\treturn f\"dim {self.dimM},{self.dimN}\" +\"\\n\" \\\n\t\t\t+ \"\\n\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def print_matrix(x, y, A):\n\n # decide whether there is a 0th row/column\n if len(x) == len(A):\n print \"%5s\" % (\" \"),\n else:\n print \"%5s %5s\" % (\" \",\"*\"),\n y = \"*\" + y\n\n # print the top row\n for c in x:\n print \"%5s\" % (c),\n print\n\n for j in xrange(len(A[0])):\n print \"%5s\" % (y[j]),\n for i in xrange(len(A)):\n print \"%5.0f\" % (A[i][j]),\n print", "def print_matrix(x, y, A):\n\n # decide whether there is a 0th row/column\n if len(x) == len(A):\n print \"%5s\" % (\" \"),\n else:\n print \"%5s %5s\" % (\" \",\"*\"),\n y = \"*\" + y\n\n # print the top row\n for c in x:\n print \"%5s\" % (c),\n print\n\n for j in xrange(len(A[0])):\n print \"%5s\" % (y[j]),\n for i in xrange(len(A)):\n print \"%5.0f\" % (A[i][j]),\n print", "def __repr__(self):\n output = \"\"\n output +=\"V:\\n\"\n for row in self.V:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\" \n \n output += \"\\nW:\\n\"\n for row in self.W:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\"\n return output", "def print_matrix_on_screen(matrix, width=5):\n for row in matrix:\n print(''.join(['{0:>{w}}'.format(item, w=width) for item in row]))", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def display(self):\n for row in self.tile_rows:\n print(row)", "def show_map(map_):\n for r in map_.matrix:\n print(''.join(r))\n print()", "def print_matrix(matrix):\n\n print(result_is)\n max_len = max((len(str(round(n))) for row in matrix for n in row))\n cell_pattern = \"{{:{pos}.{part}f}}\"\\\n .format(pos=max_len + max_decimals + 2, part=max_decimals)\n for row in matrix:\n row_gen = (cell_pattern.format(cell) for cell in row)\n print(*row_gen)", "def print_matrix(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n print(matrix[i][j], end='\\t')\n print('')", "def printInfo(matrix):\n\n print(\"Groups:\")\n for group in matrix.matrix.group_labels:\n print(\"\\t{0}\".format(group))\n\n print(\"Samples:\")\n for sample in matrix.matrix.sample_labels:\n print(\"\\t{0}\".format(sample))", "def print(self):\n for l in range(self.h+1):\n print(\"Weight matrix between layer \" + str(l) + \" and layer \" + str(l+1))\n print(self.W[l])", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def _print_matrix_info(mtrx, name):\r\n pr = lambda t: print(\"ht3_solver:\\t\" + t)\r\n pr(\"MATRIX INFO:\")\r\n pr(\"Matrix:\\t\" + name)\r\n pr(\"Description:\\t\" + str(mtrx.description))\r\n pr(\"Shape:\\t\" + str(mtrx.shape))", "def print_grid(self):\r\n\t\tprint self.grid" ]
[ "0.7653871", "0.7148127", "0.70552987", "0.6862053", "0.66274035", "0.6617468", "0.66046643", "0.6604163", "0.6551498", "0.6539895", "0.6511098", "0.6499741", "0.648501", "0.64522415", "0.6444947", "0.6436321", "0.6391782", "0.6391782", "0.6387855", "0.63827", "0.638262", "0.6379424", "0.63664526", "0.63628715", "0.63398933", "0.6336254", "0.6318183", "0.63180834", "0.63070184", "0.6288248" ]
0.74877894
1
Take an ordered list and return a set of pairs >>> list_to_pairs([1, 2, 3, 4]) set([(1, 2), (3, 4)]) >>> list_to_pairs(['a', 'b', 'c', 'd']) set([('a', 'b'), ('c', 'd')])
def list_to_pairs(l): return {(l[2*i], l[2*i+1]) for i in range(len(l)/2)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _list2pair(s_list):\n return s_list.pair", "def __unordered_pairs(l):\n\n return [(l[i], l[j]) for i in range(len(l) - 1) for j in range(i + 1, len(l))]", "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item", "def list_to_set(l): \r\n s = { item for item in l }\r\n return s", "def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)", "def toPairs(self):\n result = Pairs()\n for first, second in enumerate(self):\n if first < second:\n result.append((first, second))\n return result", "def get_pairs(my_list):\n return [(current, my_list[idx + 1] if - 1 else None) for idx, current in enumerate(my_list) if idx < len(my_list) - 1]", "def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs", "def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs", "def removeDuplicates(list):\n\treturn Set((item for item in list))", "def get_pairs(word):\r\n pairs = set()\r\n prev_char = word[0]\r\n for char in word[1:]:\r\n pairs.add((prev_char, char))\r\n prev_char = char\r\n return pairs", "def removeDuplicates(list):\n\treturn set((item for item in list))", "def symbolize_pairs(list_of_pair_string: str) -> list:\n symbolized_pairs = []\n for pair in list_of_pair_string:\n symbolized_pairs.append(pair[0] + '-' + pair[1])\n\n return symbolized_pairs", "def all_pairs(elements):\n if len(elements) < 2:\n return []\n elif len(elements) == 2:\n return [(elements[0], elements[1])]\n else:\n new_pairs = []\n for elt in elements[1:]:\n new_pairs.append((elements[0], elt))\n return all_pairs(elements[1:]) + new_pairs", "def make_pairs(sequence):\n length = len(sequence)\n return [\n (sequence[i], sequence[i + 1])\n for i in range(length - 1)\n ]", "def pair(first, second):\n return [first, second]", "def pairs_as_dict(pairs: Iterable[Tuple[Any, Any]]) -> Dict[Any, List[Any]]:\n d = defaultdict(list)\n for p in pairs:\n d[p[0]].append(p[1])\n return d", "def pairs(iterable):\n previous = None\n for item in iterable:\n current = item\n if previous is not None:\n yield previous, current\n previous = current", "def get_synset_pairs(synset: Synset) -> list:\n # Remove phrasal expressions from the literals\n literals = remove_phrases(synset.literals)\n\n # Generate a list of unique pairs representing the cartesian product of the list of literals of the single synset\n pairs = unique([tuple(sorted((w1, w2), key=itemgetter(0))) for w1 in literals for w2 in literals if not w1 == w2])\n return pairs", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def pairwise(lst):\r\n if not lst: return\r\n\r\n for i in range(len(lst)-1):\r\n yield lst[i], lst[i+1]\r\n yield lst[-1], None", "def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]:\n\n a, b = itertools.tee(s)\n next(b, None)\n return zip(a, b)", "def remove_duplicates(pairs):\n unique_pairs = []\n pair_list = {}\n for i in range(len(pairs)):\n for j in range(len(pairs[0])):\n # This is to remove self-matches\n if i == pairs[i][j]:\n continue\n if (\"%d,%d\" % (i, pairs[i][j]) not in pair_list):\n # This is stored to remove symmetric duplicates\n pair_list[\"%d,%d\" % (i, pairs[i][j])] = 1\n pair_list[\"%d,%d\" % (pairs[i][j], i)] = 1\n unique_pairs.append([i, pairs[i][j]])\n return unique_pairs", "def list_to_set(llist : LinkedList) -> set:\n current_node = llist.head\n lset = set()\n while current_node is not None:\n lset.add(current_node.value)\n current_node = current_node.next\n \n return lset", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def two_pair(ranks):\n pairlist = ()\n for r in ranks:\n if ranks.count(r) == 2: pairlist = pairlist +(r, )\n set(pairlist)\n pairlist = tuple(set(pairlist))\n if len(pairlist) == 2:\n return pairlist\n else:\n return None", "def from_list():\n my_list = [1, 2, 3, 3, 3, 4]\n my_set = set(my_list)\n #new_list = [my_set] # [set([1, 2, 3, 4])]\n new_list = list(my_set) # [1, 2, 3, 4]\n print(new_list)", "def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))" ]
[ "0.660942", "0.64966893", "0.6364352", "0.61183286", "0.605809", "0.6053188", "0.6031079", "0.5851849", "0.58477825", "0.58477825", "0.58171743", "0.5804332", "0.56152576", "0.5568751", "0.5562793", "0.5527791", "0.5483368", "0.54775673", "0.5433456", "0.54054826", "0.53941983", "0.5362208", "0.53473264", "0.5319942", "0.5302615", "0.52760315", "0.523991", "0.52149886", "0.52056044", "0.51811683" ]
0.69936943
0
Whether we have any path or not
def __nonzero__(self): return any(self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Exists(self, path: str) -> bool:\n ...", "def started_path(self):\n if self.ros_node.get_data('/diff_drive/path_achieved') is None:\n return False\n return not self.ros_node.get_data('/diff_drive/path_achieved')", "def exists(self, path):", "def exists(self):\r\n return os.path.exists(self.full_path)", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def check_path(filename):\n return not bool(checkPath(filename))", "def isPath(self,pin,head=\"check path exist\",exit_on_error=False,logmsg=False):\n p = os.path.abspath(self.expandvars(pin))\n if os.path.isdir(p):\n if logmsg:\n logger.info(head + \"\\n --> dir exist: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n return p\n #--- error no such file\n logger.error(head + \"\\n --> no such directory: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n if exit_on_error:\n raise SystemError(self.__MSG_CODE_PATH_NOT_EXIST)\n return False", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def exists(self):\n return self.islink() or exists(self._path)", "def is_path(self, s):\n return True", "def has_path_source(self) -> bool:\n\n return any(self.is_path_type(x) for x in self.parameters)", "def _path_not_in_cfg(p):\n\n n = cfg.get_any_node(p.addr, is_syscall=p.jumpkinds[-1].startswith('Ijk_Sys'))\n if n is None:\n return True\n\n if n.simprocedure_name == 'PathTerminator':\n return True\n\n return False", "def available(self):\n contextPhyPath = self.context.getPhysicalPath()\n portalPhyPath = api.portal.get().getPhysicalPath()\n path = [elem for elem in list(contextPhyPath) if elem not in list(portalPhyPath)] # noqa\n depth = len(path)\n if depth < 2:\n return False\n return True", "def exists(self, path: str) -> bool:\n pass", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)", "def is_log_path_valid(self):\n if self.log_paths:\n return self.path in self.log_paths\n else:\n # If .log_paths is empty, just assume all paths are legal\n return True", "def path_exists(self, path):\n try:\n os.stat(path)\n except OSError:\n return False\n return True", "def file_exist() -> bool:\n pass", "def has_path(self, source, target):\n try:\n sp = nx.shortest_path(self.G, source, target)\n except nx.NetworkXNoPath:\n return False\n return True", "def check_path(data_pointer, log, msg):\n if not os.path.exists(data_pointer):\n log.debug(msg)\n return False\n else:\n return data_pointer", "def path_exists(path):\r\n return os.path.exists(path)", "def has_default_path(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"has_default_path\")", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def is_nonempty_analysis(self, path):\r\n if not os.path.exists(path):\r\n return False\r\n empty_prefix = self.empty_prefix()\r\n with open(path, 'r') as infile:\r\n prefix = infile.read(len(empty_prefix))\r\n return prefix != empty_prefix", "def has_path(t, word):\n assert len(word) > 0, 'no path for empty words.'\n \"*** YOUR CODE HERE ***\"", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def exists(self):\n return self.path.exists()", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def exists(self, path: PathLike):" ]
[ "0.71787864", "0.70898473", "0.7088618", "0.7060411", "0.7047555", "0.70403135", "0.701854", "0.6938369", "0.6864744", "0.6841349", "0.6832685", "0.6822189", "0.67805845", "0.67735195", "0.67616457", "0.676147", "0.675315", "0.6720118", "0.67038804", "0.67023015", "0.67017573", "0.66864526", "0.668424", "0.667071", "0.66590285", "0.66527677", "0.66494817", "0.6638916", "0.66340655", "0.6621058" ]
0.8143155
0
The length of our path If we have no path, then 0 if path is a string, then 1 if path is an array, then the length of the array
def __len__(self): if self.path_is_string: if self.path: return 1 else: return 0 else: if self.path_type in (list, tuple): if not any(item for item in self.path): return 0 return len(self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return len(self.path)", "def __len__(self):\n return len(self.paths)", "def __len__(self):\n return len(self.paths)", "def get_path_length(self) :\n return self.path_length", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.A_paths)", "def __len__(self):\n return len(self.file_paths)", "def __len__(self):\n return int(np.floor(len(self.wav_paths)))", "def calculateWireLenght(path_list):\n\n total_length = 0\n for path in path_list:\n total_length += len(path)\n return total_length", "def __len__(self):\n return len(self.img_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def size(path):", "def getPathLength(self, path):\r\n path_nodes = path\r\n # print(path_nodes)\r\n total_length = 0 # km\r\n for i in range(len(path_nodes)-1):\r\n next_edge = None\r\n for edge in self.graph.getAdj(path_nodes[i]):\r\n if edge.getExtremityNode() == path_nodes[i+1]:\r\n next_edge = edge\r\n if next_edge is None: # it means the path is invalid\r\n return None\r\n total_length += next_edge.getWeight()\r\n # print(next_edge.getTravelType(), end=\" \")\r\n return total_length", "def path_depth(path):\n parts = os.path.dirname(path).split('/')\n parts = [part for part in parts if part != '']\n length = len(parts)\n return length", "def __len__(self):\n return len(self.files[self.split])", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def get_length(array):\n return len(list(array))", "def path_cost(path):\n return len(path)", "def __len__(self):\n return len(self.imgs_path)", "def path_length(G, path, weight=\"weight\"):\n length = 0\n u = path[0]\n for v in path[1:]:\n length += G[u][v][weight]\n u = v\n return length", "def Length(self) -> int:", "def Length(self) -> int:", "def __len__():", "def __len__():", "def __len__():", "def evaluatePath(self):\n pathLength = 0\n if len(self.path) > 0:\n previousCity = self.path[0]\n for ind in range(1, len(self.path)):\n pathLength += previousCity.distanceWith(self.path[ind].name)\n previousCity = self.path[ind]\n return pathLength" ]
[ "0.7701619", "0.7640768", "0.7640768", "0.7519123", "0.73827785", "0.73827785", "0.73827785", "0.73827785", "0.7197739", "0.7128884", "0.69983554", "0.6982287", "0.697171", "0.697171", "0.697171", "0.6954649", "0.6872361", "0.6815414", "0.68066716", "0.6795057", "0.6773987", "0.67624176", "0.6761947", "0.6742368", "0.6720989", "0.6720989", "0.66896397", "0.66896397", "0.66896397", "0.6659181" ]
0.8607569
0
Iterate through the parts of our path
def __iter__(self): if self.path_is_string: if self.path: yield self.path else: for part in self.path: yield part
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterPath(self, path):\n \n pathIndex = 0\n folderNames = path.split(self.PATH_SEPARATOR)\n \n while pathIndex < len(folderNames):\n yield folderNames[pathIndex], self.PATH_SEPARATOR.join(folderNames[0:pathIndex + 1])\n pathIndex += 1", "def path_entries(self):", "def traverse(self, path):\n\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n # print(path)\n # print('files:', self.files)\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n print('info', directory, path_list[index:])\n return directory, path_list[index:]", "def __iter__(self):\n for path in self._paths: yield autopaths.Path(path.complete_path)", "def path_it(d):\n for p in _path_walk([], d):\n yield p", "def paths(self):\n return self._visit(self.start)", "def __iter__(self):\n for (_,_,path) in self.frontierpq:\n yield path", "def traverse(name, furtherPath):", "def __iter__(self):\n for path_id in self._path_ids:\n yield (path_id, getattr(self, path_id))", "def print_paths(self):\n for path_key, path_value in self.paths.items():\n # Handler for request in path\n self.current_path = path_key\n for request_key, request_value in path_value.items():\n if request_key == 'parameters':\n continue\n self.get_main_title(path_key, request_key)\n self.get_description(request_value)\n self.get_status_code_and_schema_rst(request_value['responses'])\n self.get_params(path_value['parameters'], 'param')\n self.get_params(request_value['parameters'], 'query')", "def path(self):\n\n for obj in self.lineage():\n yield obj.text", "def IteratePathParents(start_path):\n path = os.path.abspath(start_path)\n yield path\n while path.strip('/'):\n path = os.path.dirname(path)\n yield path", "def _iter_variant_extracted_paths(root, path, variants):\n for variant in sorted(variants, key=len, reverse=True):\n inner_path = os.path.join(*[str(request) for request in variant])\n resolved_path = os.path.join(root, inner_path)\n\n if filer.in_directory(path, resolved_path, follow=False):\n yield path.replace(inner_path + os.sep, \"\")", "def zenpathsplit(self, path):\n return zenpathsplit(path)", "def traverse(self, path):\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n return directory, path_list[index:]", "def explode(self, path):\n log(6, \"explode\")\n gibs = []\n\n head = path\n while True:\n\n if head == \"/\":\n gibs.insert(0, head)\n break\n\n head, tail = os.path.split(head)\n gibs.insert(0, tail)\n\n return gibs", "def do_filepath_forloop(self, line):\n self.E_str = \"do_filepath_forloop\"\n line = line.replace(\" \", \"\")\n line = line[line.find(\"filepath\")+5:]\n filepath_str, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n filepath_str = gen_parse.rm_quotation_marks(filepath_str)\n\n all_filepaths = glob.glob(filepath_str)\n if not all_filepaths:\n self.print_warning(\"I can't find anything matching the filepath you've enterred!\")\n\n return all_filepaths", "def get_paths(self):\n return self.path.split(',')", "def EnumeratePaths(args, paths):\n for fn in paths:\n try:\n # 3 - for ftp://, 4 for http://, 5 for https://\n if fn.find(\"://\") in (3,4,5):\n yield fn\n if os.path.islink(fn) and args.skiplinks:\n pass\n elif os.path.isdir(fn) and args.recurse:\n for f in DirEnumerator(args, fn):\n yield f\n elif os.path.isfile(fn):\n yield fn\n except Exception as e:\n print(\"EXCEPTION %s accessing %s\" % (e, fn))", "def _handle_path(path: str) -> Callable:\n parts = Path(path).parts\n\n result = _cogs\n for part in parts:\n result = result[part]\n\n return result", "def split_input_dirs(self, paths):\n\n for path in paths:\n yield path", "def path(self):\n\t\tif '/' in self.name:\n\t\t\treturn self.name.split(\"/\")\n\t\telse:\n\t\t\treturn self.name.split(\"\\\\\")", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def test_get_parts(self):\n pass", "def _traverse_path(path):\n path = Path(path)\n\n if path.is_dir():\n yield from path.rglob(\"*\")\n else:\n yield path", "def getpath(data, path):\n\n for p in path.split('.'):\n data = data[p]\n\n return data", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n for part in dirname.strip('/').split(os.path.sep)[2:][-2:] + [fname_noext]:\n for match in PATH_SPLIT.split(part):\n if match:\n yield match", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def pathComponents(path):\n parts = [p for p in path.split(os.path.sep) if p not in [\"\", \".\"]]\n return parts", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list" ]
[ "0.6414649", "0.63788563", "0.63580775", "0.6353441", "0.63335335", "0.61753285", "0.6140012", "0.6139662", "0.6131938", "0.611136", "0.600778", "0.5968926", "0.59639174", "0.5872381", "0.58316004", "0.58115816", "0.57891244", "0.57866406", "0.5755483", "0.5751014", "0.57189083", "0.57042503", "0.5651182", "0.5650989", "0.56352246", "0.55913", "0.55904293", "0.5588357", "0.5582231", "0.55404574" ]
0.7063032
0
If the path is a string, treat it as a list of that one string, otherwise, treat path as it is and get the index of the path as specified by key
def __getitem__(self, key): path = self.path if self.path_is_string: path = [path] return path[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self, path):\n try:\n indices = [int(x) if x.isdigit() else x for x in split(r'[\\/\\[\\]]+', path[1:])]\n return reduce(lambda x, y: x[y], indices, self.document)\n except:\n return None", "def _get_array_index(array_path):\n\n if not array_path.startswith('@'):\n raise XJPathError('Array index must start from @ symbol.')\n array_path = array_path[1:]\n if array_path == 'last':\n return -1\n if array_path == 'first':\n return 0\n if array_path.isdigit() or (array_path.startswith('-')\n and array_path[1:].isdigit()):\n return int(array_path)\n else:\n raise XJPathError('Unknown index reference', (array_path,))", "def _get_by_path(dic, keys):\n assert len(keys) > 0, \"Path key can not be an empty list.\"\n\n d = dic\n for key in keys[:-1]:\n if isinstance(key, int) or key in d:\n d = d[key]\n else:\n return None\n if keys[-1] in d or (isinstance(d, list) and keys[-1] < len(d)):\n return d[keys[-1]]\n\n return None", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def ex_path(path):\n if path is None:\n return []\n elif isinstance(path, str):\n return path.split(\",\")\n elif isinstance(path, list):\n return path\n return []", "def get_entry(obj, *path):\n\n try:\n for elem in path:\n is_index = isinstance(elem, int)\n is_list = isinstance(obj, list)\n if is_index != is_list:\n raise UpdateException('index given for non-list or vice versa')\n obj = obj[elem]\n return obj\n except Exception as ex:\n path_str = '/'.join(map(str, path))\n msg = f'unable to access object path \"/{path_str}\"'\n raise UpdateException(msg) from ex", "def _pathlist(self, key, arr):\n params = {}\n i = 0\n for value in arr:\n i += 1\n params[\"%s.%s\" % (key, i)] = value\n return params", "def index(self, key: _K) -> int: # type: ignore[override]\n if isinstance(key, int):\n if 0 <= key < len(self.__keys):\n return key\n raise IndexError(key)\n elif isinstance(key, str):\n try:\n return self.__keys.index(key)\n except ValueError as exc:\n raise KeyError(key) from exc\n else:\n raise TypeError(key)", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def index_config(config, path, index_structure=True):\n key = None\n sections = path.split(\"/\")\n if not index_structure:\n key = sections[-1]\n sections = sections[:-1]\n for section in sections:\n if isinstance(config, dict):\n if section not in config:\n raise ValueError(\"Invalid path %s in config\" % path)\n config = config[section]\n elif isinstance(config, list):\n section_index = None\n try:\n section_index = int(section)\n except ValueError:\n for i, block in enumerate(config):\n if isinstance(block, dict) and block.get(\"name\") == section:\n section_index = i\n break\n if section_index is None:\n raise ValueError(\n \"Expected an array index in path, but got %s instead\" % section\n )\n config = config[section_index]\n else:\n raise ValueError(\n \"Paths in config can only represent object and array structures\"\n )\n if index_structure:\n return config\n else:\n return config, key", "def access_path(data: dict or any, path: list[str]) -> any:\n if path:\n first = path[0]\n rest = path[1:]\n return access_path(data[first], rest)\n return data", "def get_path(event, path):\n if isinstance(path, string_types):\n path = [path]\n node = event\n for subpath in path:\n try:\n node = getitem(node, subpath)\n except:\n if hasattr(event, \"meta\"):\n sys.stderr.write(\"event.meta: {}\\n\".format(event.meta))\n sys.stderr.write(\n \"node = {} type = {}, subpath = {} type = {}\\n\".format(\n node, type(node), subpath, type(subpath)\n )\n )\n raise\n return node", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def check_and_resolve_path(key, parameter):\n if 'paths' in key:\n return [resolve_relative_path(p) for p in parameter]\n if 'path' in key:\n return resolve_relative_path(parameter)\n return parameter", "def get_index(self, key):\n return self.keys.index(key)", "def __getitem__(self, item):\n if isinstance(item, str):\n item = [i for i, v in enumerate(self.list) if item == v.name]\n if len(item) > 0:\n item = item[0]\n return self.list[item]", "def _index_from_key(self, key):\n\t\t\n\t\treturn self.columns.index(str.upper(key[0])), self.rows.index(key[1])", "def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None", "def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)", "def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)", "def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1", "def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])", "def getpath(data, path):\n\n for p in path.split('.'):\n data = data[p]\n\n return data", "def path(self, paths):\n resolved = paths[0]\n try:\n data = self.story.resolve_context(paths[0])\n item = data[paths[0]]\n for path in paths[1:]:\n if isinstance(path, str):\n item = item[path]\n\n assert isinstance(path, dict)\n object_type = path.get(\"$OBJECT\")\n if object_type == \"range\":\n item = self.range(path[\"range\"], item)\n else:\n resolved = self.object(path)\n # Allow a namedtuple to use keys or index\n # to retrieve data.\n if TypeUtils.isnamedtuple(item) and isinstance(\n resolved, str\n ):\n item = getattr(item, resolved)\n else:\n item = item[resolved]\n return item\n except IndexError:\n raise StoryscriptRuntimeError(\n message=f\"List index out of bounds: {resolved}\"\n )\n except (KeyError, AttributeError):\n raise StoryscriptRuntimeError(\n message=f'Map does not contain the key \"{resolved}\". '\n f\"Use map.get(key: <key> default: <default value>) to \"\n f\"prevent an exception from being thrown. Additionally, you \"\n f\"may also use map.contains(key: <key>) to check if a key \"\n f\"exists in a map.\"\n )\n except TypeError:\n return None", "def _resolve_path(d, path):\n accum_value = d\n for node_key in path:\n accum_value = accum_value[node_key]\n return accum_value", "def findIndex(lst, key, value):\r\n\r\n for i, dic in enumerate(lst):\r\n if dic['properties'][key] == value:\r\n return i\r\n return -1", "def DeterminePath_Index(self, path = None, currentPosition = None):\n\t\tif path == None:\n\t\t\tpath = self.path\n\n\t\tif currentPosition == None:\n\t\t\tcurrentPosition = self.currentPosition\n\n\t\t# Lowest distance\n\t\tlowest_d = 0\n\t\t# lowest index\n\t\tlowest_i = 0\n\n\t\t# print(len(path))\n\n\t\tfor index in range(len(path)):\n\t\t\t# print(index)\n\t\t\tif index == len(path) - 1:\n\t\t\t\t# We are interpolating forward\n\t\t\t\tbreak\n\t\t\tsegment = self.InterpolateSection(path[index], path[index + 1])\n\n\t\t\tlowestDistance = 0\n\n\t\t\tfor x in range(len(segment)):\n\t\t\t\tif x == 0:\n\t\t\t\t\t# As required for initial value\n\t\t\t\t\tlowestDistance = Distance_LatLongs(segment[x].Latitude, segment[x].Longitude, currentPosition.Latitude, currentPosition.Longitude)\n\t\t\t\t\tcontinue\n\n\t\t\t\tdistance = Distance_LatLongs(segment[x].Latitude, segment[x].Longitude, currentPosition.Latitude, currentPosition.Longitude)\n\n\t\t\t\tif distance < lowestDistance:\n\t\t\t\t\tlowestDistance = distance\n\n\t\t\tif index == 0:\n\t\t\t\tlowest_d = lowestDistance\n\t\t\t\tlowest_i = index\n\t\t\telse:\n\t\t\t\tif lowestDistance < lowest_d:\n\t\t\t\t\tlowest_d = lowestDistance\n\t\t\t\t\tlowest_i = index\n\n\t\treturn lowest_i", "def _get_signal_index(self, signal):\n # Process signal :\n signal = signal.replace(', :', '').replace(':, ', '')[1:-1]\n # Find index :\n idx = tuple(int(k) for k in signal.split(', '))\n return self._navidx.index(idx)", "def index(self, value):\n self.__validate_value(value)\n for index, v in enumerate(self.__list):\n if v == value:\n return index" ]
[ "0.7110364", "0.6245824", "0.5962914", "0.5931321", "0.58305013", "0.5829733", "0.5766378", "0.57603914", "0.5714388", "0.5689732", "0.56681454", "0.56269306", "0.5546474", "0.55295324", "0.547897", "0.5431614", "0.54167795", "0.54066426", "0.5379117", "0.5333005", "0.5333005", "0.53296757", "0.5326073", "0.5309709", "0.52961516", "0.52926165", "0.5276392", "0.52720994", "0.52693486", "0.5263097" ]
0.65758604
1
Return whether the first part of this path is this string
def first_part_is(self, key): if self.path_is_string: return self.path.startswith(str(key) + '.') if not self.path: return not bool(key) if self.path_type is list: return self.path[0] == key if self.path_type is Path: return self.path.first_part_is(key) return self.joined().startswith(str(key) + '.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def is_path(self, s):\n return True", "def match_substring(self, str):\n if self.repo_relative_path.find(str) >= 0:\n return True\n\n if self.uuid:\n if (\"uuid://%s%s\" % (self.uuid, self.repo_relative_path)).find(str) >= 0:\n return True\n\n if self.url:\n if (self.url + self.repo_relative_path).find(str) >= 0:\n return True\n\n return False", "def startswith(self, base):\n if self.path_is_string:\n return self.path.startswith(base)\n if not self.path:\n return not bool(base)\n if self.path_type is list and len(self.path) is 1:\n return self.path[0].startswith(base)\n return self.joined().startswith(base)", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def _preset_start(self, hdr):\n test = hdr.read(8)\n hdr.seek(-8, 1)\n\n try:\n test = self._cleanup_string(test)\n except UnicodeDecodeError:\n # Some non-null filler bytes\n return False\n\n # First entry in preset is .isf filename with full path\n # If preset is used at all, first entry is non-null.\n # Paths start with / (older Sun systems), or drive letter\n # (D: newer Windows systems)\n if re.match('[A-Z]:', test) or re.match('/.', test):\n return True\n else:\n return False", "def is_key_string(string):\r\n return len(string) > 1 and string[0] == '_'", "def is_path(cls, path_or_content: str):\n return (\n len(str(path_or_content).strip().splitlines()) == 1 and\n (os.path.splitext(path_or_content)[1] in cls.valid_file_extensions)\n )", "def has_prefix(cls, string1, string2):\n return len(cls.get_prefix(string1, string2)) > 0", "def _is_substring(s1, s2):\n\treturn s1.find(s2) != -1", "def _IsPathParameter(comp: str) -> bool:\n return comp.startswith(\"<\") and comp.endswith(\">\")", "def is_simple_name(s):\n\n assert utils.is_string_type(s)\n assert len(s) > 0\n\n def valid_first(c):\n return c.isalpha() or c == \"_\"\n def valid_later(c):\n return c.isalnum() or c == \"_\"\n return valid_first(s[0]) and all(valid_later(c) for c in s)", "def is_command(text):\n return text.startswith('/')", "def is_command(text):\n return text.startswith('/')", "def command_basename_startswith(self, op):\n return self.__command_basename.startswith(op)", "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def is_file_o(value):\n if not (type(value) is str and os.path.split(value)[0]):\n return False\n else:\n return True", "def is_content(cls, path_or_content):\n return any(path_or_content.lstrip().startswith(s) for s in cls.valid_content_start)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_c4x_path(path_string):\r\n return StaticContent.ASSET_URL_RE.match(path_string) is not None", "def is_path(path_or_stream):\n return isinstance(path_or_stream, anyconfig.compat.STR_TYPES)", "def startsWith(self, p: str) -> bool:\n return not p or p[0] in self.d and self.d[p[0]].startsWith((len(p) > 1 and p[1:]) or '')", "def simple(self) -> bool:\n return is_simple(self.string)", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def _pattern_is_simple(pattern):\n return bool(re.match('[\\\\w_]+$', tostring(pattern)))", "def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])", "def hasSuffix(self, s):\n node, off = self.followPath(s)\n if node is None:\n return False # fell off the tree\n if off is None:\n # finished on top of a node\n return '$' in node.out\n else:\n # finished at offset 'off' within an edge leading to 'node'\n return node.lab[off] == '$'", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def stringcheck(self, rule, string):\n if not \"*\" in rule:\n return rule in string\n elif rule[0] == \"*\":\n return string.endswith(rule[1:])\n elif rule[-1] == \"*\":\n return string.startswith(rule[:-1])\n else:\n start, end = rule.split(\"*\")\n return string.startswith(start) and string.endswith(end)", "def is_action_str(string: str) -> bool:" ]
[ "0.7293922", "0.6914573", "0.68029654", "0.66941047", "0.6247078", "0.62381357", "0.622055", "0.61871207", "0.61856234", "0.6183542", "0.61692894", "0.61627215", "0.6134897", "0.6134897", "0.61286926", "0.61160666", "0.60987896", "0.6065164", "0.60475963", "0.60429084", "0.6023953", "0.60107285", "0.5990941", "0.5963782", "0.5946686", "0.59430516", "0.5937853", "0.5934463", "0.59140307", "0.5890043" ]
0.77528477
0
Does the path start with this string?
def startswith(self, base): if self.path_is_string: return self.path.startswith(base) if not self.path: return not bool(base) if self.path_type is list and len(self.path) is 1: return self.path[0].startswith(base) return self.joined().startswith(base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def starts_with(self, prefix: str) -> bool:\n return self.search(prefix, True)", "def startsWith(self, prefix: 'str') -> 'bool':\n p = self.root\n for ch in prefix:\n if ch in p:\n p = p[ch]\n else:\n return False\n return True", "def first_part_is(self, key):\n if self.path_is_string:\n return self.path.startswith(str(key) + '.')\n if not self.path:\n return not bool(key)\n if self.path_type is list:\n return self.path[0] == key\n if self.path_type is Path:\n return self.path.first_part_is(key)\n return self.joined().startswith(str(key) + '.')", "def startsWith(self, prefix: str) -> bool:\n return bool(self.find(prefix))", "def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])", "def command_basename_startswith(self, op):\n return self.__command_basename.startswith(op)", "def startswith( self, prefix ):\n return len(self.commands) >= 1 and self.commands[0].startswith( prefix )", "def startsWith(self, prefix):\n now = self.tree\n for i in prefix:\n if i in now:\n now = now[i]\n else:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n curr_chars = self.chars\n for c in list(prefix):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return True", "def startsWith(self, prefix: str) -> bool:\n return self._traverse(prefix)", "def startsWith(self, p: str) -> bool:\n return not p or p[0] in self.d and self.d[p[0]].startsWith((len(p) > 1 and p[1:]) or '')", "def startsWith(self, prefix: str) -> bool:\n cur = self.root\n for letter in prefix:\n if letter not in cur:\n return False\n cur = cur[letter]\n return True", "def match_substring(self, str):\n if self.repo_relative_path.find(str) >= 0:\n return True\n\n if self.uuid:\n if (\"uuid://%s%s\" % (self.uuid, self.repo_relative_path)).find(str) >= 0:\n return True\n\n if self.url:\n if (self.url + self.repo_relative_path).find(str) >= 0:\n return True\n\n return False", "def starts_with(self, prefix: str) -> bool:\n curr = self.root\n for ch in prefix:\n curr = curr.children.get(ch)\n if curr is None:\n return False\n return True", "def startswith(value, s):\n\n if not value: return False\n return value.find(s) == 0", "def start_with(self, prefix):\n return self.__find_node(prefix) != None", "def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)", "def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)", "def starts_with(strn, prefix):\n return strn.startswith(prefix)", "def startsWith(self, prefix: str) -> bool:\n word = prefix\n if len(word) == 0:\n return True\n idx = ord(word[0]) - ord('a')\n if self.children[idx] is None:\n return False\n return self.children[idx].startsWith(word[1:])", "def startsWith(self, prefix):\n if prefix[0] not in self.trie:\n return False\n cur = self.trie[prefix[0]]\n for char in prefix[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "def is_path(self, s):\n return True", "def start_with(self, prefix):\n node = self.search_prefix(prefix)\n return node is not None", "def starts_with(self, other: \"ProofPath\") -> bool:\n return self.common_prefix_len(other) == len(other)", "def startsWith(self, prefix):\n level = self.trie\n for c in prefix:\n if c in level:\n level = level[c]\n else:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n temp=self.root\n for char in prefix:\n index=ord(char)-ord('a')\n \n if(not temp.children[index]):\n return False\n temp=temp.children[index]\n \n return True", "def startsWith(self, prefix: str) -> bool:\n current = self.root\n for letter in prefix: \n current = current.children.get(letter)\n if not current:\n return False\n return True" ]
[ "0.75289965", "0.7167043", "0.71369225", "0.7123099", "0.70859516", "0.7052085", "0.7035095", "0.7010681", "0.69964033", "0.6967141", "0.6956085", "0.69028676", "0.6893171", "0.6884508", "0.68805164", "0.68793577", "0.68676823", "0.68597203", "0.68597203", "0.6850657", "0.6827935", "0.6799538", "0.67746586", "0.67746586", "0.6761419", "0.6760654", "0.6735999", "0.6722866", "0.67055404", "0.6691028" ]
0.7676601
0
Return a clone of this path with all the same values
def clone(self): joined_function = lambda: dot_joiner(self.path, self.path_type) return self.__class__(self.path, self.configuration, self.converters, self.ignore_converters, joined_function=joined_function)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n # Run the constructor.\n other = BoardPath()\n # Copy the object variables\n other._current_cost = self._current_cost\n other._path = self._path[:]\n other._current_loc = self._current_loc\n return other", "def copy(self):\n return PathPoint(self.species.new_species(), deepcopy(self.constraints))", "def clone(self):\n return copy.deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return self.copy()", "def clone(self) -> Any:\n clone = super().clone()\n clone.clear()\n return clone", "def clone(self):\n cloned = Graph()\n for v in self.vertices:\n cloned.vertices[v] = self.vertices[v].clone()\n return cloned", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def clone(self):\n sc=copy.copy(self)\n sc.farms=list()\n for f in self.farms:\n sc.farms.append(f.clone(f.name, f.size))\n sc.airborne=list()\n for a in self.airborne:\n sc.airborne.append(a.clone(a.farma, a.farmb, a.distance))\n return sc", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def clone(self):", "def copy(self):\r\n return copy.deepcopy(self)", "def clone(self):\n return shallow_clone(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)" ]
[ "0.7665588", "0.7400094", "0.704908", "0.70057935", "0.70057935", "0.70057935", "0.70057935", "0.6978792", "0.6946278", "0.6935552", "0.69177884", "0.68884027", "0.68542576", "0.6847932", "0.6830563", "0.679955", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586", "0.67944586" ]
0.74887073
1
Do the conversion on some path if any conversion exists Return (converted, did_conversion) Where ``did_conversion`` is a boolean indicating whether a conversion took place.
def do_conversion(self, value): converter, found = self.find_converter() if not found: return value, False else: converted = converter(self, value) self.converters.done(self, converted) if hasattr(converted, "post_setup"): converted.post_setup() return converted, True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def find_converter(self):\n if self.ignore_converters:\n return None, False\n return self.converters.matches(self)", "def performConversion(self):\n return _libsbml.CompFlatteningConverter_performConversion(self)", "def convert(\n value: str,\n conversion_formulas: Iterable[ConversionFormula[ConvertResultType]]) -> ConvertResultType:\n none_applied = True\n\n for conversion_formula in conversion_formulas:\n if conversion_formula.applicable(value):\n none_applied = False\n try:\n return conversion_formula.load(value)\n except Exception:\n pass\n\n if none_applied:\n raise NoApplicableConversionFormulaError(\n f\"Could not find applicable gonversion formula for {value}\")\n raise NoSuccessfulConversionFormulaError(\n f\"All applicable conversion formulas failed to run successfully for {value}.\")", "def convert(self):\n logger.info('Convert: %s' % self.base)\n if self.mimetype in ['application/msword',\n \"application/vnd.openxmlformats-officedocument\"\n \".wordprocessingml.document\"]:\n if 'multipart/related' in self.stream:\n self.process_mht()\n returncode = self.convert_docfile(self.docx_path, self.name.docx,\n self.docbook_path, self.name.xml)\n else:\n returncode = self.convert_docfile(self.source_path, self.name,\n self.docbook_path, self.name.xml)\n if returncode is False:\n returncode = self.convert_docfile(self.source_path, self.name,\n self.docx_path, self.name.docx)\n returncode = self.convert_docfile(self.docx_path, self.name.docx,\n self.docbook_path, self.name.xml)\n if not os.path.exists(os.path.join(\n self.docbook_path, self.name.xml)):\n logger.info('Not exists')\n self.resultcode = 2\n return False\n if returncode is False:\n self.resultcode = 3\n return False\n self.remove_note()\n self.file_docbook_to_markdown()\n logger.info(' '.join([self.base.base, self.name.base, 'Success']))\n self.resultcode = 0\n return True\n else:\n logger.info('Skip')\n self.resultcode = 1\n return False", "def getConverter( format ):\n\n data = set(format.split(\"-\"))\n\n if \"one\" in data:\n if \"forward\" in data:\n if \"closed\" in data:\n return __one_forward_closed \n else:\n return __one_forward_open\n else:\n if \"closed\" in data:\n return __one_both_closed\n else:\n return __one_both_open\n else:\n if \"forward\" in data:\n if \"closed\" in data:\n return __zero_forward_closed\n else:\n return __zero_forward_open\n else:\n if \"closed\" in data:\n return __zero_both_closed\n else:\n return __zero_both_open", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n if kwargs['full_output']:\n if current_state.label() is None:\n return (accept_input, current_state, None)\n else:\n return (accept_input, current_state, output)\n else:\n if not accept_input:\n return None\n return output", "def _process_convert_output_(self, output_data, **kwargs):\n if kwargs['always_include_output']:\n return super(Automaton, self)._process_convert_output_(\n output_data, **kwargs)\n accept_input, current_state, output = output_data\n if kwargs['full_output']:\n return (accept_input, current_state)\n else:\n return accept_input", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n return (accept_input, current_state, output)", "def conversion(is_train, is_extrapolation):\n context = composition.Context()\n # TODO(b/124038528): implement extrapolation for fraction conversions too\n if is_extrapolation or random.choice([False, True]):\n return _conversion_decimal(\n context, is_train=is_train, is_extrapolation=is_extrapolation)\n else:\n return _conversion_fraction(context, is_train=is_train)", "def save_converted_paths(\n src_trace_tuples: Sequence[TraceTuple],\n dest_trace_tuples: Sequence[TraceTuple],\n driver: str,\n destination: Path,\n):\n for src_trace_tuple, dest_trace_tuple in zip(src_trace_tuples, dest_trace_tuples):\n for original_path, convert_path in zip(\n (src_trace_tuple.traces_path, src_trace_tuple.area_path),\n (dest_trace_tuple.traces_path, dest_trace_tuple.area_path),\n ):\n convert_filetype(original_path, destination / convert_path, driver=driver)", "def precheck(self):\n if (not dfs.exists(self.outputpath)):\n logger.debug(\"precheck(%s): outputpath %s does not exist, ready to run.\" \n % (self, self.outputpath))\n return 'ready'\n inTSs = [dfs.modtime(file) for file in self.inputpaths]\n outTS = dfs.modtime(self.outputpath)\n newer = reduce(lambda x,y: x or y, [(inTS>outTS) for inTS in inTSs])\n logger.debug(\"Input timestamps: %s\" % inTSs)\n logger.debug(\"Output timestamp: %s\" % outTS)\n if newer:\n logger.debug(\"At least one input file is newer than outputfile, ready to run.\")\n dfs.delete(self.outputpath)\n return 'ready'\n else:\n logger.debug(\"All input files are newer than outputfile, skipping.\")\n return 'skip'", "def has_conversion(self, plugin):\n plugin = kurt.plugin.Kurt.get_plugin(plugin)\n return plugin.name in self._plugins", "def conversion(arg1, arg2=None, name=None, cost=None):\n if arg2 is None:\n return conversion_method(arg1, name=name, cost=cost)\n else:\n return computation(arg1, arg2, name=name, cost=cost)", "def convert (self, lossless=False):\n self._has_errors = False\n if self._progress:\n max_val = 0\n for root, dirs, files in os.walk(self._in_dir):\n max_val += len(files)\n self._bar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()],\n maxval=max_val).start()\n pool = multiprocessing.Pool()\n command = CONVERT_TO_JP2_LOSSY\n if lossless:\n command = CONVERT_TO_JP2_LOSSLESS\n for root, dirs, files in os.walk(self._in_dir):\n out_rel_path = os.path.relpath(root, self._in_dir)\n out_full_path = os.path.abspath(\n os.path.join(self._out_dir, out_rel_path))\n try:\n os.mkdir(out_full_path)\n except OSError:\n # It is not an error for the directory to already exist.\n pass\n for name in files:\n basename = os.path.splitext(name)[0]\n in_file = os.path.join(root, name)\n base_out_file = os.path.join(out_full_path, basename)\n tiff_file = '%s.tif' % base_out_file\n jp2_file = '%s.jp2' % base_out_file\n if self._force or not(os.path.isfile(jp2_file)):\n params = (in_file, tiff_file, jp2_file, command)\n pool.apply_async(self._convert, params,\n callback=self._result_callback)\n elif self._progress:\n self._bar.update(self._bar.currval + 1)\n pool.close()\n pool.join()\n if self._progress:\n self._bar.finish()\n return not(self._has_errors)", "async def convert(self, ctx: Context, argument: str) -> ConverterOutputT:\n raise NotImplementedError", "def EncodingConverter_CanConvert(*args, **kwargs):\n return _gdi_.EncodingConverter_CanConvert(*args, **kwargs)", "def checkValue(self, value):\n if self.converter and value:\n return self.converter(value)\n return value", "def CanConvert(*args, **kwargs):\n return _gdi_.EncodingConverter_CanConvert(*args, **kwargs)", "def rule_convert(source_path, build_path):\n logging.info(\n \"Searching path `{}` for YAML rule definitions to convert ...\".format(\n source_path\n )\n )\n set_logger()\n convert_rules(source_path, build_path)", "def execute(self):\n if self.database:\n if self.database not in self.DATABASES:\n print \"No converter with specified key found!\"\n return False\n self.run_converter(self.database.lower())\n else:\n for converter_key in self.DATABASES.iterkeys():\n self.run_converter(converter_key)\n\n return True", "def convert(self, conversion_format: str) -> None:\n if self.extension == conversion_format:\n return\n Logger.Logger.log('Converting the song into ' + conversion_format)\n converter: Converter.Converter = Converter.Converter(self)\n # Convert the file into the given format.\n new_path: str = converter.convert(conversion_format)\n if os.path.exists(self.path):\n os.remove(self.path)\n # Update the path and reload tags according to new file generated.\n self.set_path(new_path, self.original_path)", "def passed(self):\n\n compile = self.compile_successful\n if self.compileTest or not compile: return compile\n\n compare = not self.doComparison or self.compare_successful\n analysis = self.analysisRoutine == \"\" or self.analysis_successful\n return compare and analysis", "def truthy_or_none(converter: Callable[[Any], T]) -> Callable[[Any], Optional[T]]:\n return lambda value: converter(value) if value else None", "def convert_path_units(self, path_to_conv):\n\n converted_path = copy.deepcopy(path_to_conv) # TODO: figure out if this is the right way to do this- I have not yet understood how data should be handled here\n\n for i, coord in enumerate(converted_path):\n converted_path[i][0] = coord[0] / self.units_in_meter\n converted_path[i][1] = coord[1] / self.units_in_meter\n converted_path[i][2] = coord[2] / self.units_in_meter\n\n return converted_path", "def __check_path__(path):\n\n def seq_iter(iterable):\n result = []\n for p in iterable:\n if isinstance(p, Iterable) and \\\n not isinstance(p, (basestring, tuple)):\n result += seq_iter(p)\n else:\n result.append(p)\n\n return result\n\n if isinstance(path, (basestring, int, float, complex, NoneType)):\n return path,\n else:\n return tuple(seq_iter(path))", "def process(self, *args, **kwargs):\n from copy import copy\n\n # set default values\n options = copy(self._process_default_options_)\n options.update(kwargs)\n\n condensed_output = (options['list_of_outputs'] is False and\n not options['full_output'])\n\n if condensed_output:\n options['list_of_outputs'] = True\n options['only_accepted'] = True\n\n result = super(Transducer, self).process(*args, **options)\n\n if (condensed_output and not result or\n not options['full_output'] and result is None):\n raise ValueError(\"Invalid input sequence.\")\n if condensed_output and len(result) >= 2:\n raise ValueError(\"Found more than one accepting path.\")\n\n if condensed_output:\n return result[0]\n return result", "def convert(kls, path, configuration=None, converters=None, ignore_converters=None, joined=None):\n path_type = type(path)\n if path_type is Path:\n return path\n else:\n joined = dot_joiner(path, item_type=path_type)\n return Path(path, configuration, converters, ignore_converters, joined=joined)", "def Get2dConversion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_Get2dConversion(self, *args)", "def _handle_path(path: str) -> Callable:\n parts = Path(path).parts\n\n result = _cogs\n for part in parts:\n result = result[part]\n\n return result" ]
[ "0.5848007", "0.566183", "0.5174522", "0.51070803", "0.49476284", "0.4783851", "0.47672653", "0.47506908", "0.46229607", "0.45591906", "0.44457802", "0.44202554", "0.44014907", "0.44013757", "0.43933994", "0.438883", "0.43888274", "0.43719685", "0.43644443", "0.43442747", "0.4342604", "0.43421116", "0.42793325", "0.42685843", "0.42592424", "0.42426994", "0.42264217", "0.41970488", "0.41879258", "0.41552916" ]
0.6817883
0
Find appropriate converter for this path
def find_converter(self): if self.ignore_converters: return None, False return self.converters.matches(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getConverterFor(self, *args):\n return _libsbml.SBMLConverterRegistry_getConverterFor(self, *args)", "def converter(self):\r\n return self._converter", "def converter(self):\n return self._converter", "def converter(self):\r\n return self._endpoint.converter", "def getConverters(self):\n pass", "def getConverterByIndex(self, *args):\n return _libsbml.SBMLConverterRegistry_getConverterByIndex(self, *args)", "def _get_converter(orig, target):\n try:\n func = getattr(utils, f'convert_{orig}_to_{target}')\n except AttributeError:\n func = partial(convert_unit, orig=orig, to=target)\n return func", "def get_for(self, file=None, format=''):\n if isinstance(file, DirectoryStream):\n # directory 'stream'\n return (self._names['dir'],)\n if format:\n try:\n converter = (self._names[format],)\n except KeyError:\n raise ValueError(\n f'Format specifier `{format}` not recognised'\n )\n else:\n converter = self.identify(file)\n if not converter:\n if not file or file.mode == 'w' or maybe_text(file):\n format = self._default_text\n else:\n format = self._default_binary\n if file and format:\n if Path(file.name).suffix:\n level = logging.WARNING\n else:\n level = logging.DEBUG\n logging.log(\n level,\n f'Could not infer format from filename `{file.name}`. '\n f'Falling back to default `{format}` format'\n )\n try:\n converter = (self._names[format],)\n except KeyError:\n pass\n return converter", "def getConverter( format ):\n\n data = set(format.split(\"-\"))\n\n if \"one\" in data:\n if \"forward\" in data:\n if \"closed\" in data:\n return __one_forward_closed \n else:\n return __one_forward_open\n else:\n if \"closed\" in data:\n return __one_both_closed\n else:\n return __one_both_open\n else:\n if \"forward\" in data:\n if \"closed\" in data:\n return __zero_forward_closed\n else:\n return __zero_forward_open\n else:\n if \"closed\" in data:\n return __zero_both_closed\n else:\n return __zero_both_open", "def get_converter(category, name, disable_logging=False):\n return PluginLoader._import(\"convert.{}\".format(category), name, disable_logging)", "def converter(item):\n pass", "def converterValue(self, converter):\n pass", "def get_converter(self, parameter):\n if parameter not in self._converters:\n param = self.get_parameter(parameter)\n try:\n scale = float(param['Scale'])\n except KeyError:\n scale = 1\n\n def convert(value):\n # easy_scale = float(param['EasyScale'])\n # easy_scale_multiplier = float(param['EasyScaleMultiplier'])\n return value * scale\n\n return convert", "def get_converter(from_case: Optional[LetterCaseType], to_case: LetterCaseType) -> Optional[ConverterType]:\n to_case = get_letter_case(to_case)\n\n name = f\"to_{to_case.name.lower()}_case\"\n\n if from_case is not None:\n from_case = get_letter_case(from_case)\n name = f\"{from_case.name.lower()}_{name}\"\n\n return globals().get(name)", "def do_conversion(self, value):\n converter, found = self.find_converter()\n if not found:\n return value, False\n else:\n converted = converter(self, value)\n self.converters.done(self, converted)\n if hasattr(converted, \"post_setup\"):\n converted.post_setup()\n return converted, True", "def search_registry(filename):\n for converter in data_importers:\n if converter.check_importable(filename):\n return converter\n logging.error('No converter found', filename)\n return False", "def converters(self) -> Iterator[Tuple[str, Type[ConverterAPI]]]:", "def converters(self):\r\n return self._converters", "def _convert_to(maybe_device, convert_to):\n\n # Fast path. If we already have the information required, we can\n # save one blkid call\n if (\n not convert_to\n or (convert_to == \"device\" and maybe_device.startswith(\"/\"))\n or maybe_device.startswith(\"{}=\".format(convert_to.upper()))\n ):\n return maybe_device\n\n # Get the device information\n if maybe_device.startswith(\"/\"):\n blkid = __salt__[\"disk.blkid\"](maybe_device)\n else:\n blkid = __salt__[\"disk.blkid\"](token=maybe_device)\n\n result = None\n if len(blkid) == 1:\n if convert_to == \"device\":\n result = next(iter(blkid))\n else:\n key = convert_to.upper()\n result = \"{}={}\".format(key, next(iter(blkid.values()))[key])\n\n return result", "def register_converter(self, converter, conv_type, conv_format=None):\n self.flask_plugin.register_converter(converter, conv_type, conv_format)", "def registerConverter(convertType, converter):\n converters[convertType] = converter", "def converterBean(self, converter):\n pass", "def convert(kls, path, configuration=None, converters=None, ignore_converters=None, joined=None):\n path_type = type(path)\n if path_type is Path:\n return path\n else:\n joined = dot_joiner(path, item_type=path_type)\n return Path(path, configuration, converters, ignore_converters, joined=joined)", "def get_available_convert_plugins(convert_category, add_none=True):\n convertpath = os.path.join(os.path.dirname(__file__),\n \"convert\",\n convert_category)\n converters = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(convertpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n if add_none:\n converters.insert(0, \"none\")\n return converters", "def addConverter(self, *args):\n return _libsbml.SBMLConverterRegistry_addConverter(self, *args)", "def from_other_system(self, coord):\n #logger.debug(f\"from_other_conversions:{self.from_other_conversions}\")\n #logger.debug(f\"Converting to {self.system_tuple} from {name}\")\n if coord.system.system_type not in self.from_other_conversions:\n raise ValueError((f\"No Converter from {coord.system.system_type}\"\n f\"{coord.system.system_tuple} to {self.system_type}\"\n f\"{self.system_tuple} Found\"))\n\n return self.from_other_conversions[coord.system.system_type](self, coord)", "def guess_type(self, path):\n\n # The text attribute assumes UTF-8\n self.extensions_map = {k: v if 'text/' not in v else v + ';charset=UTF-8'\n for k, v in self.extensions_map.items()}\n base, ext = posixpath.splitext(path)\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n ext = ext.lower()\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n else:\n return self.extensions_map['']", "def register_converter(self, converter, name=None):\n if not name:\n name = converter.__name__\n if \"Converter\" in name:\n name = converter.__name__.replace(\"Converter\", \"\")\n self.url_map.converters[name] = converter", "def _resolve_path(self, path):\n filepath = None\n mimetype = None\n\n for root, dirs, files in self.filter_files(self.path):\n # Does it exist in error path?\n error_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'error_templates', path)\n try:\n with open(error_path):\n mimetype, encoding = mimetypes.guess_type(error_path)\n filepath = error_path\n except IOError:\n pass\n\n # Does it exist in Tarbell blueprint?\n if self.base:\n basepath = os.path.join(root, self.blueprint_name, path)\n try:\n with open(basepath):\n mimetype, encoding = mimetypes.guess_type(basepath)\n filepath = basepath\n except IOError:\n pass\n\n # Does it exist under regular path?\n fullpath = os.path.join(root, path)\n try:\n with open(fullpath):\n mimetype, encoding = mimetypes.guess_type(fullpath)\n filepath = fullpath\n except IOError:\n pass\n\n return filepath, mimetype", "def _convert(self, filepath):\n raise NotImplementedError()" ]
[ "0.6894406", "0.6664965", "0.6639402", "0.6496533", "0.6168772", "0.61331046", "0.6112294", "0.60424626", "0.59838074", "0.59664655", "0.5777416", "0.56580913", "0.5560846", "0.5559826", "0.55199546", "0.54745203", "0.54502314", "0.5372859", "0.5343171", "0.5282362", "0.5274377", "0.52730405", "0.52359563", "0.5215325", "0.52035123", "0.52003366", "0.5176343", "0.5168925", "0.5130978", "0.50739706" ]
0.7259743
0
Return the converted value for this path
def converted_val(self): return self.converters.converted_val(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert(self, value):\n return value", "def convert(self, value):\r\n return value", "def convert(self, value, context):\n return value", "def convert(self, value):\r\n return Converter.convert(self, value)", "def _conversion(self, val):\n if (self.__set_type == \"str\"):\n return val\n else:\n try:\n return ast.literal_eval(val)\n except ValueError:\n return None", "def to_python(self, value):\r\n return value", "def to_python(self, value):\r\n return value", "def to_python(self, value):\n return value", "def to_python(self, value):\n return value", "def cast(self, value):\n\n return value", "def convert(self):\n return", "def _value(self):\n if self.data is None:\n return self._original_value\n elif self.get_label:\n return self.get_label(self.data)\n else:\n return unicode(self.data)", "def friendly_to_internal(self, value):\n return value * self.conversion", "def getValue(self):\n return _libsbml.ConversionOption_getValue(self)", "def _transform_value(self, metadata: MetadataTransformModel | None) -> str | None:\n # not all fields are required\n if metadata is None:\n return None\n\n # not all metadata fields have a path, but they must have a path or default\n if metadata.path is None:\n return metadata.default\n\n # get value from path\n value = self._path_search(metadata.path)\n\n # return default if value of None is returned from Path\n # IMPORTANT: a value of None passed to the transform may cause a failure (lambda x.lower())\n if value is None:\n return metadata.default\n\n for t in metadata.transform or []:\n if isinstance(value, str):\n # pass value to static_map or callable, but never both\n if t.filter_map is not None:\n value = self._transform_value_map(value, t.filter_map, True)\n elif t.static_map is not None:\n value = self._transform_value_map(value, t.static_map)\n elif callable(t.method):\n value = self._transform_value_callable(value, t.method, t.kwargs)\n\n # ensure only a string value or None is returned (set to default if required)\n if value is None:\n value = metadata.default\n elif not isinstance(value, str):\n value = str(value)\n\n return value", "def internal_to_friendly(self, value):\n return value / self.conversion", "def to_native_value(self):\n return self.__class__.get_setting(self.key)", "def getvalue(self):\n if self._value:\n _d,_t=self._value.strip(' ').replace('\"', '').split(',')\n _d=_d.split('/')\n _t=_t.split(':')\n return '%s%s%sT%s%s%s'%(_d[0].zfill(4), _d[1].zfill(2), _d[2].zfill(2),\n _t[0].zfill(2), _t[1].zfill(2), _t[2].zfill(2))", "def getValue(self, *args):\n return _libsbml.ConversionProperties_getValue(self, *args)", "def to_native_value(self):\n return self.__class__.get_setting(self.key, user=self.user)", "def to_representation(self, value):\n if value:\n data = default_storage.open(value.path).read()\n encoded=base64.b64encode(data).decode(\"utf-8\")\n return encoded", "def value(self):\n return self.value()._value", "def normalize(self, value):\n return str(value)", "def convert(self, value):\n return self.ASCIIToDecimal(value)", "def value(self):\n return self.string", "def get(self):\n # We use here the fact, that when used in a widget, the value will be\n # retrieved directly instead through .get(). Thus the widget will always \"see\" the str representation.\n value = self._tk.globalgetvar(self._name)\n try:\n value = self.convert(value)\n except Exception as e:\n value = Invalid\n if self._validated_hook:\n self._validated_hook(value is not Invalid)\n return value", "def resolve_value(obj, _):\n return obj.value.decode()", "def convert_to_path(arg: Any) -> Path:\n return Path(arg)", "def marshal(self):\n return self.value", "def get_prep_value(self, value):\n return self.to_json(value)" ]
[ "0.7295479", "0.7265097", "0.66316205", "0.6538032", "0.65285426", "0.63996595", "0.63996595", "0.63584256", "0.63584256", "0.63554263", "0.6269452", "0.6219786", "0.61724675", "0.6151281", "0.60600317", "0.60472697", "0.6013175", "0.5997187", "0.59565735", "0.5947187", "0.59042096", "0.5846244", "0.5844757", "0.57444835", "0.57353544", "0.57297796", "0.5710398", "0.5705824", "0.57054293", "0.5704529" ]
0.7291777
1
Returns the weekday of the product. Used only for products that are bound to a specific day.
def get_weekday(self): weekdays = dict(PRODUCT_WEEKDAYS) return weekdays.get(self.weekday, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def weekday(self) -> int:\n return WD_EN.index(self.time.day.lower())", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def weekday(self):\n return (self.toordinal() + 6) % 7", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def weekday(self):\n if self.month is not None and self.day is not None:\n return self.todate().weekday()\n else:\n return None", "def day_of_week(self) -> str:\n return self.elements[4]", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def weekday(self, dt):\n days = {\n 0: self.MONDAY,\n 1: self.TUESDAY,\n 2: self.WEDNESDAY,\n 3: self.THURSDAY,\n 4: self.FRIDAY,\n 5: self.SATURDAY,\n 6: self.SUNDAY\n }\n return days.get(dt.weekday())", "def weekday(day):\n return (day % 7) - 1", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def day(self) -> Optional[pulumi.Input[Union[str, 'WeekDay']]]:\n return pulumi.get(self, \"day\")", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def day_of_week_for_start_day(self):\n import calendar\n\n day = self.idfobjects[\"RUNPERIOD\"][0][\"Day_of_Week_for_Start_Day\"]\n\n if day.lower() == \"sunday\":\n return calendar.SUNDAY\n elif day.lower() == \"monday\":\n return calendar.MONDAY\n elif day.lower() == \"tuesday\":\n return calendar.TUESDAY\n elif day.lower() == \"wednesday\":\n return calendar.WEDNESDAY\n elif day.lower() == \"thursday\":\n return calendar.THURSDAY\n elif day.lower() == \"friday\":\n return calendar.FRIDAY\n elif day.lower() == \"saturday\":\n return calendar.SATURDAY\n else:\n return 0", "def day_of_the_week(arg):", "def __weekday(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"weekday\",\n operand1=self,\n operand2=None\n )", "def weekday(self, *args, **kwargs): # real signature unknown\r\n pass", "def get_trip_day_weekday(self):\n\n days = {\"Lundi\":'1', \"Mardi\":'2', \"Mercredi\":'3', \"Jeudi\":'4', \"Vendredi\":'5', \"Samedi\":'6', \"Dimanche\":'7'}\n\n return int(days[dict(self.TRIP_DAY_SELECTIONS)[self.trip_day]])", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def get_weekday(self, as_str=False):\n\n # First we get the first 8 bits stored in the weekday register\n # and translate it to an integer\n wd_8bits = self.__read_register(_REGISTER_WEEKDAY)\n\n # Then we extract the weekday and return it\n wd = wd_8bits & 0x07 # 0x07 = 0b00000111\n\n if as_str is True: # if we want the weekday's name\n wd = WEEKDAY_STR[wd]\n\n return wd", "def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]" ]
[ "0.7443026", "0.72535807", "0.72381914", "0.71214", "0.71214", "0.7113097", "0.7078143", "0.7078143", "0.7075242", "0.7074787", "0.70650786", "0.7055152", "0.7055152", "0.69736695", "0.6911523", "0.6890648", "0.67727506", "0.67682344", "0.6700004", "0.6569597", "0.6561082", "0.6559521", "0.6518441", "0.65047103", "0.64920723", "0.64920723", "0.64901155", "0.6484302", "0.64611936", "0.64022267" ]
0.79881895
0
Checks if the contact has expired invoices, returns True or False
def is_debtor(self): return bool(self.expired_invoices_count())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expired_invoices_count(self):\n return self.get_expired_invoices().count()", "def expired(self):\n\n return self.getNotAfter() <= rpki.sundial.now()", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return (self.date_joined + expiration_date <= datetime.datetime.now())", "def isExpired(self):\n return True/False", "def is_expired(self):\n return self.expiration_date <= self._now()", "def is_expired(self):\n return utcnow() >= self.expires", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def is_access_expired(self) -> bool:\n entitlement_contract = self.cfg.entitlements.get(self.name, {})\n # TODO(No expiry per resource in MVP yet)\n expire_str = entitlement_contract.get('expires')\n if not expire_str:\n return False\n expiry = datetime.strptime(expire_str, '%Y-%m-%dT%H:%M:%S.%fZ')\n if expiry >= datetime.utcnow():\n return False\n return True", "def is_expired(self) -> bool:\n return now() > self.expires", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def has_expired(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def expired(self):\n return int(time.time()) > self.expires_at", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n\n return (self.user.date_joined + expiration_date <= datetime.datetime.now())", "def _has_expired(self):\r\n expired = False\r\n if hasattr(self, 'Expiration'):\r\n now = datetime.datetime.utcnow()\r\n expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')\r\n expired = (now >= expiration)\r\n else:\r\n raise ValueError(\"ERROR: Request for expired property, but no Expiration in HIT!\")\r\n return expired", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def is_expired(self):\n delta = datetime.datetime.now() - self.created_at\n\n return delta.total_seconds() > 15*60", "def expired(self): # pragma: no cover\n return self._state in (_State.EXPIRING, _State.EXPIRED)", "def isCalibrationExpired(self):\n return (getI1ProTimeUntilCalibrationExpire() < 200)", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def has_expired(self):\n if not self._initialized:\n return True\n\n expires_in = self.expires_in\n if expires_in > 0:\n return False\n else:\n return True", "def activation_expired(self):\n return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()", "def check_contract_expire_soon():\n\n contract_expire_soon_list = []\n contract_expired_list = []\n\n # get user contract\n # refactoring techniques: replace temp with query\n user_role = get_user_role()\n contract_list = user_role.user_contracts\n\n for contract in contract_list:\n if contract['dateSigned'] and not contract['terminationDate']:\n\n # get expiry date and current date\n expiry_date = datetime.strptime(contract['expiryDate'][:19], \"%Y-%m-%dT%H:%M:%S\")\n current_time = datetime.now()\n \n # get the diffenrence between expiry date and current date\n difference = expiry_date - current_time\n days = divmod(difference.days, 86400)\n\n # Refactoring techniques: composing method\n contract_expire_soon = (days[1] <= 31) and (days[1] >= 0)\n contract_expired = days[0] < 0\n\n if contract_expire_soon:\n contract_expire_soon_list.append(contract)\n if contract_expired:\n contract_expired_list.append(contract)\n \n # return True if there's elem in any list, else False\n if len(contract_expire_soon_list) >= 1 or len(contract_expired_list) >= 1:\n return True, contract_expire_soon_list, contract_expired_list\n else:\n return False, contract_expire_soon_list, contract_expired_list", "def valid(self):\n return self.expiry > timezone.now()", "def is_expired(self):\n return self._bExpired", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def expired(self):\n return rospy.get_rostime() - self.start_time > self.duration", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False" ]
[ "0.70427215", "0.70207226", "0.6923476", "0.68999964", "0.6830679", "0.6823518", "0.67947525", "0.67519253", "0.6697096", "0.66647214", "0.6662017", "0.66469795", "0.66408515", "0.6632337", "0.6622034", "0.6621522", "0.6588166", "0.6573824", "0.65544784", "0.6533347", "0.6527256", "0.6524979", "0.6514029", "0.649161", "0.64100575", "0.6407443", "0.63750196", "0.6334864", "0.63015556", "0.6199255" ]
0.71151036
0
Returns a queryset with the expired invoices for the contact.
def get_expired_invoices(self): return self.invoice_set.filter( expiration_date__lte=date.today(), paid=False, debited=False, canceled=False, uncollectible=False, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expired_invoices_count(self):\n return self.get_expired_invoices().count()", "def get_subscriptions_with_expired_invoices(self):\n subscriptions = []\n for invoice in self.get_expired_invoices():\n for invoice_item in invoice.invoiceitem_set.all():\n if (\n invoice_item.subscription\n and invoice_item.subscription not in subscriptions\n ):\n subscriptions.append(invoice_item.subscription)\n return subscriptions", "def invoices(self):\r\n return inv.Invoices(self)", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def expired(self, *args, **kwargs):\n # Expired messages are those that have been delivered, AND have a\n # set `expire_on` attribute.\n #\n # OR, those messages that were never delivered, but are no longer\n # relevant (ie. they're too old).\n now = timezone.now()\n since = timezone.now() - timedelta(days=3) # 3 days ago\n\n return self.get_queryset().filter(\n Q(expire_on__lte=now) |\n Q(deliver_on__lte=since, success=None) |\n Q(deliver_on__lte=since, success=False)\n )", "def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())", "def get_expired_campaign(self):\n kwargs = {}\n kwargs['expirationdate__lte'] = datetime.utcnow().replace(tzinfo=utc)\n return Campaign.objects.filter(**kwargs).exclude(status=CAMPAIGN_STATUS.END)", "def invoices(self):\r\n return Invoices(self)", "def filter_expired(self, queryset, name, value):\n if value:\n return queryset.filter(Q(expiration__gte=date.today()) | Q(auto_renew=True))\n return queryset", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def _get_invoices_for_payment(cls, account_id: int) -> List[InvoiceModel]:\n valid_statuses = (InvoiceStatus.APPROVED.value, InvoiceStatus.REFUND_REQUESTED.value)\n invoice_ref_subquery = db.session.query(InvoiceReferenceModel.invoice_id). \\\n filter(InvoiceReferenceModel.status_code.in_((InvoiceReferenceStatus.ACTIVE.value,)))\n\n invoices: List[InvoiceModel] = db.session.query(InvoiceModel) \\\n .filter(InvoiceModel.invoice_status_code.in_(valid_statuses)) \\\n .filter(InvoiceModel.payment_method_code == PaymentMethod.EJV.value) \\\n .filter(InvoiceModel.payment_account_id == account_id) \\\n .filter(InvoiceModel.id.notin_(invoice_ref_subquery)) \\\n .all()\n return invoices", "def get_unpaid_invoices(self, with_vat=True):\n\n return self.call(method='getUnpaidInvoices', args=[with_vat])", "def get_invoices(self, limit=50, closed=False, get_all=False):\n mask = \"mask[invoiceTotalAmount, itemCount]\"\n _filter = {\n 'invoices': {\n 'createDate': {\n 'operation': 'orderBy',\n 'options': [{\n 'name': 'sort',\n 'value': ['DESC']\n }]\n },\n 'statusCode': {'operation': 'OPEN'},\n }\n }\n if closed:\n del _filter['invoices']['statusCode']\n\n return self.client.call('Account', 'getInvoices', mask=mask, filter=_filter, iter=get_all, limit=limit)", "def get_queryset(self):\n return Investor.objects.order_by('-kyc_date')[:5]", "def get_unpaid_invoices(self):\n unpaid = []\n\n # cycle through all (active) projects\n for project in self.project_list:\n # cycle through the invoices of the project\n for invoice in project.get_invoice_list():\n # append it, if it has no paid_date set (None)\n if invoice.get_paid_date() is None and invoice.get_date() is not None:\n unpaid.append(invoice)\n\n # sort the invoices by due date\n unpaid = sorted(unpaid, key=lambda x: x.get_due_date())\n\n return unpaid", "def get(self):\n user = get_authenticated_user()\n if not user.stripe_id:\n raise NotFound()\n\n return get_invoices(user.stripe_id)", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def expiry_dates(self):\n try:\n return self._expiry_dates\n except AttributeError:\n # has to be a non-valid date, to trigger returning 'expirations'\n d = self._load_data(dt.datetime(2016, 1, 3))\n self._expiry_dates = [dt.date(x['y'], x['m'], x['d'])\n for x in d['expirations']]\n return self._expiry_dates", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def to_be_deleted(self):\n return self.filter(start__lte=timezone.now() - datetime.timedelta(days=1))", "def billing_history(cls, user=None):\n invoices = Invoice.query.filter(Invoice.user_id == user.id) \\\n .order_by(Invoice.created_on.desc()).limit(12)\n\n return invoices", "def related_reports(self):\n return Report.objects.exclude(contact_email__isnull=True).filter(contact_email__iexact=self.contact_email).order_by('status', '-create_date')[:1000]", "def get_latest_invoice(self) -> CreditorInvoice:\n\n LOGGER.info(\"Getting latest invoice from EON Romania\")\n\n session = http.create_session()\n\n response = session.get(self._login_page_url)\n if response.status_code != 200:\n raise self.Error(\"Login page is not functioning\")\n\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n csrf_token_elem = soup.find(\"input\", {\"name\": \"_csrf_token\"})\n if not csrf_token_elem:\n raise self.Error(\"Could not extract CSRF token\")\n\n login_data = {\n \"_username\": self._email,\n \"_password\": self._password,\n \"_csrf_token\": csrf_token_elem.get(\"value\"),\n }\n\n if session.post(self._login_url, login_data).status_code != 200:\n raise self.AuthError()\n\n response = session.get(self._invoices_url)\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n\n if not soup.select_one(self._selectors.sidebar):\n raise self.AuthError()\n\n invoice_date_elem = soup.select_one(self._selectors.invoice_date)\n if not invoice_date_elem:\n raise self.Error(\"Failed to get invoice date\")\n\n invoice_due_date_elem = soup.select_one(self._selectors.invoice_due_date)\n if not invoice_due_date_elem:\n raise self.Error(\"Failed to get invoice due date\")\n\n invoice_payment_status_elem = soup.select_one(\n self._selectors.invoice_payment_status\n )\n if not invoice_payment_status_elem:\n raise self.Error(\"Failed to get invoice payment status\")\n\n invoice_amount_elem = soup.select_one(self._selectors.invoice_amount)\n if not invoice_amount_elem:\n raise self.Error(\"Failed to get invoice amount\")\n\n invoice_date = invoice_date_elem.contents[-1]\n invoice_due_date = invoice_due_date_elem.contents[-1]\n invoice_payment_status = invoice_payment_status_elem.contents[-1]\n invoice_amount = invoice_amount_elem.contents[-1]\n\n invoice = CreditorInvoice(\n float(invoice_amount.replace(\",\", \".\")),\n Currency.RON,\n datetime.strptime(invoice_date, \"%d.%m.%Y\"),\n datetime.strptime(invoice_due_date, \"%d.%m.%Y\"),\n PaymentStatus.PAID_CONFIRMED\n if invoice_payment_status == \"0.00\"\n else PaymentStatus.UNPAID,\n )\n\n LOGGER.info(\"Found latest EON Romania invoice\", invoice=invoice)\n return invoice", "def organization_get_invitation_expired_token(self, client, id,\n expired_token):\n assert client.get('/organizations/' + id + '/invitations', headers={\n 'Authorization': 'Bearer ' + expired_token},\n data={}).status == '401 UNAUTHORIZED'", "def expire_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='expire'))", "def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)", "def filter_expired(self, queryset, name, value):\n\n if not common.settings.stock_expiry_enabled():\n return queryset\n\n if str2bool(value):\n return queryset.filter(StockItem.EXPIRED_FILTER)\n else:\n return queryset.exclude(StockItem.EXPIRED_FILTER)", "def get_invoices(\n page_number=None,\n limit=10,\n filter_by=None,\n filter_value=None,\n order_by=None\n):\n where_filter = \"\"\n \n valid_fields = {\n 'month': 'ReferenceMonth',\n 'year': 'ReferenceYear',\n 'document': 'Document'\n }\n\n ob_fields = []\n for ob in order_by:\n field = valid_fields.get(ob)\n if field:\n ob_fields.append(field)\n\n order_by = ob_fields\n\n if not order_by:\n order_by = [\"CreatedAt\"]\n\n order_by = [ob + \" ASC\" for ob in order_by]\n order_by = \", \".join(order_by)\n\n if page_number:\n pagination_filter = f\"\"\"\n id NOT IN (\n SELECT\n id\n FROM\n invoice\n ORDER BY\n {order_by} LIMIT {limit*(page_number-1)} )\n \"\"\"\n where_filter += \"AND \" + pagination_filter\n order_by += f\" LIMIT {limit}\"\n \n if filter_by and filter_value:\n if filter_by == 'document':\n filter_value = f'\"{filter_value}\"'\n\n where_filter += f\"AND {filter_by} = {filter_value}\"\n \n query = f\"\"\"\n SELECT\n id,\n ReferenceMonth AS month,\n ReferenceYear AS year,\n Document AS document,\n Description AS description,\n Amount AS amount\n FROM\n invoice\n WHERE\n IsActive = 1\n {where_filter}\n ORDER BY\n {order_by}\n ;\n \"\"\"\n\n try:\n conn = sqlite3.connect(DATABASE)\n conn.row_factory = dict_factory\n cursor = conn.cursor()\n cursor.execute(query)\n result = cursor.fetchall()\n return result, True\n except:\n return [], False", "def ListInvoices(self, **kwargs):\n return self._stub.ListInvoices(ln.ListInvoiceRequest(**kwargs))" ]
[ "0.67440355", "0.6387762", "0.6090469", "0.60641974", "0.5989545", "0.5955621", "0.59304565", "0.591822", "0.58444196", "0.5841969", "0.5807256", "0.5790679", "0.5630977", "0.5562243", "0.5379452", "0.5352015", "0.52384615", "0.5235957", "0.516909", "0.51581156", "0.5147197", "0.5137636", "0.5072063", "0.5070627", "0.50636876", "0.5060616", "0.50446177", "0.50243485", "0.50183105", "0.49886855" ]
0.8213595
0
Counts the amount of expired invoices for the contact.
def expired_invoices_count(self): return self.get_expired_invoices().count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def is_debtor(self):\n return bool(self.expired_invoices_count())", "def customer_acccounting(customer_orders):", "def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice", "def countAndGetCallInvoice(self,id,start,finish):\n self.calls = 0\n return self.getNumOfInvoice(id,start,finish)", "def report_expiry_seconds(self) -> int:\n return self._contract.functions.reportExpirySeconds().call()", "def expire_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='expire'))", "def orders_count(self):\n return Order.objects.filter(email=self.email).count()", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def invoices(self):\r\n return inv.Invoices(self)", "def estimate_recent_invites(realms: Collection[Realm], *, days: int) -> int:\n recent_invites = RealmCount.objects.filter(\n realm__in=realms,\n property=\"invites_sent::day\",\n end_time__gte=timezone_now() - datetime.timedelta(days=days),\n ).aggregate(Sum(\"value\"))[\"value__sum\"]\n if recent_invites is None:\n return 0\n return recent_invites", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def expired_receipt(self):\n return self._expired_receipt", "def invitation_received_no(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n qs_count = Relationship.objects.invitation_received(profile_obj).count()\n return {'invites_num': qs_count}\n return {}", "def num_attendees(self):\r\n n = sum([c.qty for c in self.contribution_set.all()])\r\n return n", "def employees_count(self, obj):\n return obj.employees_count()", "def get_subscriptions_with_expired_invoices(self):\n subscriptions = []\n for invoice in self.get_expired_invoices():\n for invoice_item in invoice.invoiceitem_set.all():\n if (\n invoice_item.subscription\n and invoice_item.subscription not in subscriptions\n ):\n subscriptions.append(invoice_item.subscription)\n return subscriptions", "def get_entity_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos?adjudicatariaid=%d' \\\n '&sort(-id)' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def _compute_count(self):\n for orders in self:\n orders.count = self.env['account.move'].search_count(\n [('invoice_origin', '=', self.name)])", "def ExpireIncr(self):\n if self.force_auto_sync:\n self.get('ExpireIncr')\n return self._ExpireIncr", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "def get_death_count(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> int\n death_header = envelope.get_header('x-death')\n\n if death_header is None:\n return 0\n\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count", "def cantidad_de_entregas(self):\r\n return self.entrega_set.count()", "def count_deleted(self):\n count = 0\n for _, e in self.contents.items():\n count = count + e.count_deleted()\n return count", "def get_already_contacted_count(self, seller_id):\n return self.get_already_contacted(seller_id).count()" ]
[ "0.6492827", "0.63781244", "0.56678087", "0.56478906", "0.5508277", "0.54931414", "0.54583615", "0.54224503", "0.5404585", "0.5396429", "0.5373567", "0.53188616", "0.5313624", "0.523742", "0.5208584", "0.51858103", "0.51830447", "0.51752496", "0.5167114", "0.5158963", "0.508917", "0.5088097", "0.50734866", "0.5049587", "0.50427586", "0.5026199", "0.5025903", "0.50186145", "0.5018139", "0.4999861" ]
0.84319055
0
Adds a contact to a campaign. If the contact is already in that campaign this will raise an exception.
def add_to_campaign(self, campaign_id): campaign = Campaign.objects.get(pk=campaign_id) if not ContactCampaignStatus.objects.filter( contact=self, campaign=campaign ).exists(): # We first create the big object that will hold the status for the campaign ContactCampaignStatus.objects.create(contact=self, campaign=campaign) return _("Contact %s (ID: %s) added to campaign") % (self.name, self.id) else: raise Exception( _("Contact %s (ID: %s) already in campaign") % (self.name, self.id) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def add_campaign(self, campaign):\n self._campaigns += [campaign]", "def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))", "def add_contact(self, contact):\n assert self.contact_in_range(contact), 'Wrong KBucket.'\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass\n\n if len(self._contacts) < constants.K:\n self._contacts.append(contact)\n else:\n raise FullBucketError('No space in bucket to insert contact')", "def addcontact(name, address=None, phone=None, email=None):\n try:\n newid = str(r.incr(\"global:nextUserId\"))\n _setcontact(newid, name, address, phone, email)\n r.sadd(\"contacts\", newid)\n\n return _getcontact(newid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "async def add_contact_to_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"replace into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()", "def test_add_contact_duplicate(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n with pytest.raises(BusinessException) as exception:\n org.add_contact(TestContactInfo.contact2)\n assert exception.value.code == Error.DATA_ALREADY_EXISTS.name", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def addAccountContact(self,contact, accountId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def add_contact_to_db_by_one(name, email, module_db_id, contact_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n if contact_id:\n try:\n contact = Contact.objects.get(id=contact_id, list_owner_id=module_db_id)\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n except Contact.DoesNotExist:\n pass\n else:\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.status = 1\n contact.save()\n success = True\n except Exception as e:\n print(e.args)\n\n return success, name, email", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def add_contact(database, name: str, email: str, phone: int) -> None:\n # Searches the database for the the current contact (excel row)\n cursor = database.execute(\"SELECT DISTINCT name, email, phone FROM contacts \"\n \"WHERE name = ? AND email =? OR phone = ?\", (name, email, phone))\n # Assigns the cursor results to the 'row' variable\n row = cursor.fetchone()\n # print(row) # For debugging\n\n # This checks if the contact already exists in the database or not\n if row:\n print(\"\\n{}, {}, {} is already in the database.\".format(name, email, phone))\n # Add the contact to the 'duplicates' table to retain the info in case of any\n # discrepancies in the final database.\n database.execute(\"INSERT INTO duplicates VALUES (?, ?, ?)\", (name, email, phone))\n else:\n cursor.execute(\"INSERT INTO contacts VALUES (?, ?, ?)\", (name, email, phone)) # Add contact to db\n cursor.connection.commit()\n # print(\"{}, {}, {} added to database.\".format(name, email, phone)) # For debugging", "def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def add_contact():\n return 'add contact'", "def _findAndAddContactByEmail(self, email):\n try:\n contact = self._findAndAddContactsByEmail(email)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c", "def post_save_add_contact(sender, **kwargs):\n obj = kwargs['instance']\n active_campaign_list = Campaign.objects.filter(phonebook__contact__id=obj.id,\n status=CAMPAIGN_STATUS.START)\n # created instance = True + active contact + active_campaign\n if kwargs['created'] and obj.status == CONTACT_STATUS.ACTIVE \\\n and active_campaign_list.count() >= 1:\n for elem_campaign in active_campaign_list:\n try:\n Subscriber.objects.create(\n contact=obj,\n duplicate_contact=obj.contact,\n status=SUBSCRIBER_STATUS.PENDING,\n campaign=elem_campaign)\n except:\n pass", "def add_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='add', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def addContact (self, dleseContributor):\n\t\tcontacts_el = self.selectSingleNode(self.dom, 'record:collection:contacts')\n\t\tif not contacts_el:\n\t\t\traise Exception, 'contacts node not found'\n\t\tel = XmlUtils.addElement(self.dom, contacts_el, 'contact')\n\t\t\n\t\tel.setAttribute (\"email\",dleseContributor.getEmail());\n\t\tel.setAttribute (\"name\", dleseContributor.getFullName());\n\t\tel.setAttribute (\"urlReport\", 'false');\n\t\tel.setAttribute (\"active\", 'false');", "def add_cc_recipient(self, address):\n if not self.validate_email_address(address):\n raise Exception(\"Invalid email address '%s'\" % address)\n self._cc.append(address)", "def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def _findAndAddContactByPhone(self, phone):\n try:\n contact = self._findAndAddContactsByPhone(phone)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c", "def sms_add_campaign_by_addressbook_id(self, sender_name, addressbook_id, body, additional_params={}):\n\n logger.info(\"Function call: sms_add_campaign_by_addressbook_id\")\n if not sender_name:\n return self.__handle_error('Seems you not pass sender name')\n if not addressbook_id:\n return self.__handle_error('Seems you not pass addressbook ID')\n if not body:\n return self.__handle_error('Seems you not pass sms text')\n\n data_to_send = {\n 'sender': sender_name,\n 'addressBookId': addressbook_id,\n 'body': body\n }\n\n if additional_params:\n data_to_send.update(additional_params)\n\n return self.__handle_result(self.__send_request('sms/campaigns', 'POST', data_to_send))", "def add_contact_to_db(name, email, module_db_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n else:\n success = False\n except Exception as e:\n print(e.args)\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 0\n contact.save()\n success = True\n else:\n success = False\n\n return success, name, email" ]
[ "0.7627295", "0.7424115", "0.68625385", "0.68550974", "0.68319404", "0.6752613", "0.66253865", "0.64495605", "0.643185", "0.64214087", "0.6403436", "0.63824666", "0.6353236", "0.63223153", "0.6272251", "0.6199567", "0.61656684", "0.60307956", "0.5995998", "0.5977701", "0.59146667", "0.58994406", "0.58641225", "0.5854355", "0.5829897", "0.5821406", "0.5816323", "0.5803648", "0.5690427", "0.56581575" ]
0.7699542
0
Checks if the contact has any active subscription. If count is passed through this, it will instead return how many of these they have.
def has_active_subscription(self, count=False): subs = self.subscriptions.filter(active=True) return subs.exists() if count is False else subs.count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def contacts_in_route_count(self):\n subprods = SubscriptionProduct.objects.filter(route=self).distinct('subscription__contact')\n return subprods.count", "def get_subscribed_users(self, obj):\n return obj.subscribed_users.count()", "def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def getCount(self):\n return self.count", "def count(self):\n return self.queryset.count() if self.allow_count else None", "def _check_active(self):\n for record in self:\n count = self.search_count([('active', '=', True), ('id', '!=', record.id)])\n if count:\n raise ValidationError(_('Multiple active records. Please Inactive other!'))", "def get_already_contacted_count(self, seller_id):\n return self.get_already_contacted(seller_id).count()", "def getCount(self):\n return self.base.get(\"count\", [])", "def available(self):\n if self._count is not None:\n # If count is available, use it\n return self._count\n else:\n # We really have no idea.\n # Don't know what do do here, but for this\n # impl, which should only be constructed with\n # python lists, self._count should never be none.\n return 0", "def count(self):\n return self.get_count()", "def rpc_campaign_alerts_is_subscribed(self, campaign_id):\n\t\tusername = self.basic_auth_user\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(db_models.AlertSubscription)\n\t\tquery = query.filter_by(campaign_id=campaign_id, user_id=username)\n\t\tresult = query.count()\n\t\tsession.close()\n\t\treturn result", "def count_subscribers(self, topic_name):\n return self._count_publishers_or_subscribers(topic_name, _rclpy.rclpy_count_subscribers)", "async def count(self) -> int:\n return (\n await self.document_model.get_motor_collection().count_documents(\n self.get_filter_query()\n )\n )", "def count(self):\n return len(self)", "def get_not_contacted_count(self, seller_id):\n return self.get_not_contacted(seller_id).count()", "def _subscribed(self, account_id):\n sql = \"\"\"SELECT 1 FROM hive_subscriptions\n WHERE community_id = :community_id\n AND account_id = :account_id\"\"\"\n return bool(DB.query_one(\n sql, community_id=self.community_id, account_id=account_id))", "def active_count(self):\n cnt = 0\n for item in self[:]:\n if item.is_alive():\n cnt += 1\n else:\n self.remove(item)\n return cnt", "def count(self):\n return self._lift(\"count\")", "def count(self):\n \n return self._count", "def clean_subscriptions(self):\n cleaned_data = super(SignupSubscriptionForm, self).clean() or self.cleaned_data\n checked = 0\n for key, value in cleaned_data.items():\n if key.startswith('subscription') and value:\n checked += 1\n if checked > 0:\n return cleaned_data\n else:\n raise ValidationError(self.unchecked_error)", "def count(self) -> int:\n return self._adapter.count()", "def active_member_count(self):\n return self._active_member_count", "def count(self, resource):\n return len(self.all(resource))", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count" ]
[ "0.75198597", "0.6862191", "0.66709375", "0.628151", "0.6169264", "0.60658246", "0.6064264", "0.60293543", "0.589479", "0.58911204", "0.5842869", "0.57893777", "0.57746965", "0.57657087", "0.5731054", "0.57261413", "0.5724176", "0.5717416", "0.5708131", "0.5706865", "0.57061046", "0.57014954", "0.5695148", "0.56808925", "0.5655462", "0.56470287", "0.5629195", "0.5629195", "0.5629195", "0.5629195" ]
0.7511574
1
Returns a queryset with the subscriptions of this contact.
def get_subscriptions(self): return self.subscriptions.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def GetSubscriptions(self):\n\n return self.__GetJson(\"/subscriptions\", True)", "def get_all_subscriptions(cls, **kwargs):\n return Subscription.query.filter(**kwargs).all()", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def get_subscriptions_from_self(self):\n return self._roster.get_my_subscriptions()", "def subscriptions(self):\n if self.__subscriptions_manager is None:\n self.__subscriptions_manager = SubscriptionsManager(\n \"/subscriptions\", self._client\n )\n return self.__subscriptions_manager", "def get_newsletters(self):\n return SubscriptionNewsletter.objects.filter(contact=self)", "def get_queryset(self):\n # distinct is needed to prevent multiple instances of product in resultset if multiple subscriptions are present\n return self.model.objects.filter(subscription__owner=self.request.user).distinct()", "def listSubscriptions() -> object:\n\n db = Db()\n return db.Subscriptions.objects().to_json()", "def GetSubscriptionsFrom(self):\n\n return self.__GetJson(\"/subscriptions/from\", True)", "def get_subscribers(self) -> Iterator[Any]:\n for subscription in self._subscriptions[self.id]:\n yield subscription.subscriber", "def get_queryset(self):\n return self.request.user.contacts.all()", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]", "def subscribedQueries(self):\n return map(Query.get, self.subscriptions)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()", "def _get_cloudwatch_subscriptions(self):\n return self._get_subscriptions(self.cloudwatch_arn)", "def subscriptions(self) -> list[Subscription]:\n return [\n *chain.from_iterable(self._simple_subscriptions.values()),\n *self._wildcard_subscriptions,\n ]", "def get_subscriptions(self, use_threading=False):\r\n \r\n if self._subscriptions is None:\r\n if use_threading:\r\n self.load_subscriptions_threaded()\r\n else:\r\n self._subscriptions = []\r\n for page in range(self._subscription_pages):\r\n self._load_subscriptions(page=page+1)\r\n return self._subscriptions", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def get(self):\n user = get_authenticated_user()\n account_number = marketplace_users.get_account_number(user)\n if not account_number:\n raise NotFound()\n\n user_subscriptions = marketplace_subscriptions.get_list_of_subscriptions(account_number)\n\n for subscription in user_subscriptions:\n bound_to_org, organization = organization_skus.subscription_bound_to_org(\n subscription[\"id\"]\n )\n # fill in information for whether a subscription is bound to an org\n if bound_to_org:\n subscription[\"assigned_to_org\"] = organization.username\n else:\n subscription[\"assigned_to_org\"] = None\n\n return user_subscriptions", "async def get_subscriptions(\n self,\n\t\tfields: Optional[List[BaseUserGroupFields]] = None,\n\t\toffset: Optional[int] = None,\n\t\tcount: Optional[int] = None,\n\t\t**kwargs\n ) -> donut.GetSubscriptionsResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"donut.getSubscriptions\", params)\n model = donut.GetSubscriptionsResponse\n return model(**response).response", "def GetSubscriptionsTo(self):\n\n return self.__GetJson(\"/subscriptions/to\", True)", "def getSubscriptionList(self):\r\n return self.feeds", "def get_all_subscriptions(self, next_token=None):\r\n params = {'ContentType' : 'JSON'}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptions', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)" ]
[ "0.75429577", "0.7530589", "0.7323063", "0.7279339", "0.7275472", "0.72422016", "0.7151664", "0.7102055", "0.7097441", "0.6876215", "0.68351597", "0.66983294", "0.66664577", "0.66407704", "0.66184616", "0.654786", "0.64856887", "0.64463097", "0.64458936", "0.6443956", "0.6442886", "0.6370845", "0.6366269", "0.63487196", "0.6348114", "0.6343456", "0.62632483", "0.6245494", "0.62437534", "0.61594826" ]
0.8102227
0
Returns a list with the distinct subscriptions that have expired invoices
def get_subscriptions_with_expired_invoices(self): subscriptions = [] for invoice in self.get_expired_invoices(): for invoice_item in invoice.invoiceitem_set.all(): if ( invoice_item.subscription and invoice_item.subscription not in subscriptions ): subscriptions.append(invoice_item.subscription) return subscriptions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def expired_invoices_count(self):\n return self.get_expired_invoices().count()", "def get_unpaid_invoices(self):\n unpaid = []\n\n # cycle through all (active) projects\n for project in self.project_list:\n # cycle through the invoices of the project\n for invoice in project.get_invoice_list():\n # append it, if it has no paid_date set (None)\n if invoice.get_paid_date() is None and invoice.get_date() is not None:\n unpaid.append(invoice)\n\n # sort the invoices by due date\n unpaid = sorted(unpaid, key=lambda x: x.get_due_date())\n\n return unpaid", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def get_unpaid_invoices(self, with_vat=True):\n\n return self.call(method='getUnpaidInvoices', args=[with_vat])", "def remove_inactive_consumers():\n\n THRESHOLD_MINUTES = 5\n\n schema = get_schema()\n for subscription in schema.subscription_type.fields.keys():\n to_remove = []\n for consumer in frappe.cache().hkeys(get_subscription_redis_key(subscription)):\n subscription_info = frappe.cache().hget(\n get_subscription_redis_key(subscription), consumer)\n\n should_remove = True\n if subscription_info.last_ping:\n last_ping = get_datetime(subscription_info.last_ping)\n if last_ping + timedelta(minutes=THRESHOLD_MINUTES) >= now_datetime():\n should_remove = False\n\n if should_remove:\n to_remove.append(consumer)\n\n if len(to_remove):\n frappe.cache().hdel(\n get_subscription_redis_key(subscription), *to_remove)", "def expired(self, *args, **kwargs):\n # Expired messages are those that have been delivered, AND have a\n # set `expire_on` attribute.\n #\n # OR, those messages that were never delivered, but are no longer\n # relevant (ie. they're too old).\n now = timezone.now()\n since = timezone.now() - timedelta(days=3) # 3 days ago\n\n return self.get_queryset().filter(\n Q(expire_on__lte=now) |\n Q(deliver_on__lte=since, success=None) |\n Q(deliver_on__lte=since, success=False)\n )", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def recurring(self):\n return self.filter(total_billing_cycles__isnull=True)", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def invoices(self):\r\n return inv.Invoices(self)", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)", "def get_recurring_orderitems(self):\n subscriptions = []\n for orderitem in self.order.orderitem_set.all():\n product = orderitem.product\n if product.is_subscription:\n self.log_extra(\"Found subscription product: %s\", product.slug)\n if product.subscriptionproduct.recurring:\n self.log_extra(\"Subscription is recurring: %s\", product.slug)\n subscriptions.append(orderitem)\n elif product.subscriptionproduct.trial_set.count() > 0:\n self.log_extra(\n \"Not recurring, but it has a trial: %s\", product.slug\n )\n subscriptions.append(orderitem)\n else:\n self.log_extra(\"Not a recurring product: %s \", product.slug)\n else:\n self.log_extra(\"Not a subscription product: %s\", product.slug)\n return subscriptions", "def get_subscriptions(self):\n return self.subscriptions.all()", "def get_expired_campaign(self):\n kwargs = {}\n kwargs['expirationdate__lte'] = datetime.utcnow().replace(tzinfo=utc)\n return Campaign.objects.filter(**kwargs).exclude(status=CAMPAIGN_STATUS.END)", "def subscriptions(self, lid): #TODO: needs to check parents\n subs = []\n for sub in self._subscriptions.values():\n if sub.lid == lid: subs.append(sub.eid)\n return subs", "def getPurchaseDates(self):\n\t\treturn self.dateList", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def _get_invoices_for_payment(cls, account_id: int) -> List[InvoiceModel]:\n valid_statuses = (InvoiceStatus.APPROVED.value, InvoiceStatus.REFUND_REQUESTED.value)\n invoice_ref_subquery = db.session.query(InvoiceReferenceModel.invoice_id). \\\n filter(InvoiceReferenceModel.status_code.in_((InvoiceReferenceStatus.ACTIVE.value,)))\n\n invoices: List[InvoiceModel] = db.session.query(InvoiceModel) \\\n .filter(InvoiceModel.invoice_status_code.in_(valid_statuses)) \\\n .filter(InvoiceModel.payment_method_code == PaymentMethod.EJV.value) \\\n .filter(InvoiceModel.payment_account_id == account_id) \\\n .filter(InvoiceModel.id.notin_(invoice_ref_subquery)) \\\n .all()\n return invoices", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def get_all_subscriptions(self, next_token=None):\r\n params = {'ContentType' : 'JSON'}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptions', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired", "def one_off(self):\n return self.filter(total_billing_cycles__isnull=False)", "def expired(self) -> (str, any):\n t = time()\n expired = []\n for k, v in self.timers.items():\n if v[0] > t: # Dict maintains order of insert.\n break\n expired.append((k,v[1]))\n for k, v in expired:\n del self.timers[k]\n yield (k,v)", "def listSubscriptions() -> object:\n\n db = Db()\n return db.Subscriptions.objects().to_json()", "def _get_related_invoices(self):\n rslt = super(StockMove, self)._get_related_invoices()\n rslt += self.mapped('picking_id.subcontract_id.invoice_ids').filtered(lambda x: x.state not in ('draft', 'cancel'))\n return rslt" ]
[ "0.78037703", "0.6684304", "0.6344531", "0.6057695", "0.6041876", "0.58982563", "0.5830548", "0.5798724", "0.5777036", "0.57421863", "0.5709596", "0.5693749", "0.5611359", "0.5611255", "0.558637", "0.55561835", "0.5468073", "0.54159164", "0.5400865", "0.53974116", "0.53971344", "0.5356106", "0.5347609", "0.5309329", "0.52915764", "0.5289708", "0.5284294", "0.52828556", "0.527851", "0.5260934" ]
0.8587474
0