query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Formats this date using calendar form, for example 19690720 basic True/False, selects basic form, e.g., 19690720. Default is False truncation
def GetCalendarString(self, basic=False, truncation=NoTruncation): if self.day is None: if self.month is None: if self.week: raise DateTimeError( "can't get calendar string with week precision") if self.year is None: if self.century is None: raise DateTimeError("no date to format") else: if truncation == NoTruncation: return "%02i" % self.century else: raise ValueError else: if truncation == NoTruncation: return "%02i%02i" % (self.century, self.year) elif truncation == Truncation.Century: return "-%02i" % self.year else: raise ValueError else: if truncation == NoTruncation: return "%02i%02i-%02i" % (self.century, self.year, self.month) elif truncation == Truncation.Century: if basic: return "-%02i%02i" % (self.year, self.month) else: return "-%02i-%02i" % (self.year, self.month) elif truncation == Truncation.Year: return "--%02i" % self.month else: raise ValueError else: if truncation == NoTruncation: if basic: return "%02i%02i%02i%02i" % ( self.century, self.year, self.month, self.day) else: return "%02i%02i-%02i-%02i" % (self.century, self.year, self.month, self.day) elif truncation == Truncation.Century: if basic: return "%02i%02i%02i" % (self.year, self.month, self.day) else: return "%02i-%02i-%02i" % (self.year, self.month, self.day) elif truncation == Truncation.Year: if basic: return "--%02i%02i" % (self.month, self.day) else: return "--%02i-%02i" % (self.month, self.day) elif truncation == Truncation.Month: return "---%02i" % self.day else: raise ValueError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def __formatDate(self, num):\n if len(num) < 2:\n num = '0'+num\n return num", "def _format_date(input_date, day_flag, sep_char=\"-\"):\n date_iso = input_date[6:10] + sep_char + input_date[0:2]\n if day_flag:\n date_iso = date_iso + sep_char + input_date[3:5]\n return date_iso", "def date_prettyfier(self, date):\n units = 'days since 1900-01-01 00:00'\n date = date * 365.25\n date = cftime.num2date(date, units)\n pretty_date = str(date.day)+'/'+str(date.month)+'/'+str(date.year-1900) \n return pretty_date", "def format_data_value(self, value):\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n elif isinstance(value, datetime.datetime):\n value = value.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return \"%s\" % value", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def format_date(self, data):\n return '%s/%s' % (data.month, data.day)", "def _format_date(self, date, humanize=True):\n if date:\n if humanize and date in self.special_dates:\n rv = self.special_dates[date]\n else:\n rv = date.strftime(self.date_format)\n return rv\n else:\n return ''", "def format_date(time=False):\n\n return arrow.get(time).format('DD-MM-YYYY')", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def format_date(date, format='%m/%d/%Y'):\n if date is not None:\n return \"%02d/%02d/%04d\" % (date.month, date.day, date.year)\n else:\n return ''", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def format(self, dt, force_date=False):\n if isinstance(dt, datetime.datetime) and not force_date:\n return dt.strftime('%y/%m/%d %H:%M')\n else:\n return self.format_date(dt)", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def format_date(self, date):\n return date.strftime('%Y-%m-%d')", "def _format_value_date_32A(self, val):\n value_date = val.get('value_date')\n currency = val.get('currency')\n interbank_settled_amount = val.get('interbank_settled_amount')\n date_format = '%y%m%d'\n if value_date and currency and interbank_settled_amount:\n value_date = FSwiftWriterUtils.format_date(value_date, date_format)\n interbank_settled_amount = apply_currency_precision(currency, abs(float(interbank_settled_amount)))\n val = str(value_date) + str(currency) + str(FSwiftMLUtils.float_to_swiftmt(str(interbank_settled_amount)))\n return val", "def _format_bool_(value):\n\n from ocgis.util.helpers import format_bool\n\n return format_bool(value)", "def special_case(self):\n Input.clear_display(self, self.entries[4])\n self.entries[4].insert(INSERT, '1712/02/30 was a real date in Sweden')\n self.entries[4].configure(state='readonly')", "def isoformat(self):\n return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def __repr__(self):\n s = '%02d/%02d/%04d' % (self.month, self.day, self.year)\n return s", "def __repr__(self):\n s = '%02d/%02d/%04d' % (self.month, self.day, self.year)\n return s", "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def date_format(self):\n return self._date_format", "def __str__(self):\n # year, formatted unambiguously as YYYY, padded with zeros if not\n # four-digit\n yearString = \"0\" * (4 - len(str(self.get_year()))\n ) + str(self.get_year())\n # month, formatted unambiguously as MM, padded with zeros if not\n # two-digit\n monthString = \"0\" * (2 - len(str(self.get_month()))\n ) + str(self.get_month())\n # day, formatted unambiguously as DD, padded with zeros\n dayString = \"0\" * (2 - len(str(self.get_day()))) + str(self.get_day())\n # aggregate all the string representations\n return \"%s-%s-%s\" % (yearString, monthString, dayString)", "def date_format_correct(self):\n valid_format = True\n try:\n new_val = self.date_edit.text()\n datetime_object = datetime.strptime(new_val, \"%Y-%m-%d\")\n except BaseException:\n valid_format = False\n return valid_format", "def __repr__(self):\n s = \"%02d/%02d/%04d\" % (self.month, self.day, self.year)\n return s", "def get_date_display(self, context):\n return '{year}/{month}'.format(year=self.get_year(),\n month=self.get_month().zfill(2))", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()" ]
[ "0.60311157", "0.58007", "0.56647265", "0.56438917", "0.5642807", "0.56053597", "0.54343116", "0.5422428", "0.5410306", "0.54023105", "0.5395332", "0.53769016", "0.53595513", "0.5357019", "0.53193927", "0.52996176", "0.52881414", "0.5263601", "0.5241613", "0.52337486", "0.5220136", "0.51987386", "0.51987386", "0.5175127", "0.51586896", "0.51414734", "0.5133749", "0.51314443", "0.51265526", "0.5121378" ]
0.6552211
0
Formats this date using ordinal form, for example 1969201 basic True/False, selects basic form, e.g., 1969201.Default is False truncation
def GetOrdinalString(self, basic=False, truncation=NoTruncation): century, year, ordinalDay = self.GetOrdinalDay() if ordinalDay is None: # same as for calendar strings return self.GetCalendarString(basic, truncation) else: if truncation == NoTruncation: if basic: return "%02i%02i%03i" % (century, year, ordinalDay) else: return "%02i%02i-%03i" % (century, year, ordinalDay) elif truncation == Truncation.Century: if basic: return "%02i%03i" % (year, ordinalDay) else: return "%02i-%03i" % (year, ordinalDay) elif truncation == Truncation.Year: return "-%03i" % ordinalDay else: raise ValueError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __formatDate(self, num):\n if len(num) < 2:\n num = '0'+num\n return num", "def GetOrdinalString(\n self,\n basic=0,\n truncation=0,\n ndp=0,\n zonePrecision=Precision.Complete,\n dp=\",\",\n tDesignator=\"T\"):\n return self.date.GetOrdinalString(basic, truncation) + tDesignator +\\\n self.time.GetString(basic, NoTruncation, ndp, zonePrecision, dp)", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def fromordinal(cls, ordinal):\n return date()", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def isoformat(self):\n return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)", "def get_date_display(self, context):\n return '{year}/{month}'.format(year=self.get_year(),\n month=self.get_month().zfill(2))", "def GetCalendarString(self, basic=False, truncation=NoTruncation):\n if self.day is None:\n if self.month is None:\n if self.week:\n raise DateTimeError(\n \"can't get calendar string with week precision\")\n if self.year is None:\n if self.century is None:\n raise DateTimeError(\"no date to format\")\n else:\n if truncation == NoTruncation:\n return \"%02i\" % self.century\n else:\n raise ValueError\n else:\n if truncation == NoTruncation:\n return \"%02i%02i\" % (self.century, self.year)\n elif truncation == Truncation.Century:\n return \"-%02i\" % self.year\n else:\n raise ValueError\n else:\n if truncation == NoTruncation:\n return \"%02i%02i-%02i\" % (self.century,\n self.year,\n self.month)\n elif truncation == Truncation.Century:\n if basic:\n return \"-%02i%02i\" % (self.year, self.month)\n else:\n return \"-%02i-%02i\" % (self.year, self.month)\n elif truncation == Truncation.Year:\n return \"--%02i\" % self.month\n else:\n raise ValueError\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%02i%02i%02i\" % (\n self.century, self.year, self.month, self.day)\n else:\n return \"%02i%02i-%02i-%02i\" % (self.century,\n self.year,\n self.month,\n self.day)\n elif truncation == Truncation.Century:\n if basic:\n return \"%02i%02i%02i\" % (self.year, self.month, self.day)\n else:\n return \"%02i-%02i-%02i\" % (self.year, self.month, self.day)\n elif truncation == Truncation.Year:\n if basic:\n return \"--%02i%02i\" % (self.month, self.day)\n else:\n return \"--%02i-%02i\" % (self.month, self.day)\n elif truncation == Truncation.Month:\n return \"---%02i\" % self.day\n else:\n raise ValueError", "def only_ordinal(number):\n _ordinal = lambda n: \"%s\" % (\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4]) # some black magic.\n return _ordinal(number)", "def change_format_to_database_index(self, date):\n year = date[0:4] + ','\n month = date[4:6]\n day = date[6:8]\n if month[0] == '0':\n month = month[1]\n\n if day[0] == '0':\n day = day[1]\n\n day = ' ' + day + ','\n month = ' ' + month\n\n return year + day + month", "def _format_date(input_date, day_flag, sep_char=\"-\"):\n date_iso = input_date[6:10] + sep_char + input_date[0:2]\n if day_flag:\n date_iso = date_iso + sep_char + input_date[3:5]\n return date_iso", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def format_date(self, data):\n return '%s/%s' % (data.month, data.day)", "def humanize_day(day_num):\n if 11 <= day_num <= 13:\n suffix = 'th'\n else:\n r = day_num % 10\n if r == 1:\n suffix = 'st'\n elif r == 2:\n suffix = 'nd'\n elif r == 3:\n suffix = 'rd'\n else:\n suffix = 'th'\n return str(day_num) + suffix", "def toordinal(self):\n return _ymd2ord(self._year, self._month, self._day)", "def to_ordinal(self):\n return ((self.month - 1) * 20) + self.day - 1", "def format_date(int_date):\n\n if int_date == 0:\n return 'today'\n\n tmp_date = int_date\n day = tmp_date % 100\n tmp_date = tmp_date / 100\n month = tmp_date % 100\n year = tmp_date / 100\n\n month_str = MONTHS[month]\n date_str = '%d-%s-%d' % (year, month_str, day)\n return date_str", "def ordinal_filter(value):\n digit = value % 10\n if 10 < value < 20:\n o = 'th'\n elif digit is 1:\n o = 'st'\n elif digit is 2:\n o = 'nd'\n elif digit is 3:\n o = 'rd'\n else:\n o = 'th'\n return '%d%s' % (value, o)", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')", "def ordinal(n):\n if 11 <= n <= 19:\n return str(n) + \"th\"\n s = str(n)\n last = int(s[-1])\n if 1 <= last <= 3:\n return s + (\"st\", \"nd\", \"rd\")[last-1]\n return s + \"th\"", "def format_date_sortkey(self, data):\n return self.input['start_date'].date().strftime('%Y%m%d')", "def ordinal(number):\n _ordinal = lambda n: \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4]) # some black magic.\n return _ordinal(number)", "def ordinal(number: int):\n if type(number) != int:\n raise TypeError\n else:\n if 11 <= number <= 20 or number % 10 == 0:\n return str(number) + 'th'\n elif number % 10 == 1:\n return str(number) + 'st'\n elif number % 10 == 2:\n return str(number) + 'nd'\n elif number % 10 == 3:\n return str(number) + 'rd'\n else:\n return str(number) + 'th'", "def _format_date(self, date, humanize=True):\n if date:\n if humanize and date in self.special_dates:\n rv = self.special_dates[date]\n else:\n rv = date.strftime(self.date_format)\n return rv\n else:\n return ''", "def format_date_iso(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y-%m-%d')", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def date_prettyfier(self, date):\n units = 'days since 1900-01-01 00:00'\n date = date * 365.25\n date = cftime.num2date(date, units)\n pretty_date = str(date.day)+'/'+str(date.month)+'/'+str(date.year-1900) \n return pretty_date", "def ordinal(num):\n if num > 9:\n secondToLastDigit = str(num)[-2]\n if secondToLastDigit == '1':\n return 'th'\n lastDigit = num % 10\n if (lastDigit == 1):\n return 'st'\n elif (lastDigit == 2):\n return 'nd'\n elif (lastDigit == 3):\n return 'rd'\n else:\n return 'th'" ]
[ "0.6289403", "0.57984483", "0.5790359", "0.5771288", "0.5765667", "0.5698245", "0.56042475", "0.5602178", "0.5583595", "0.5578616", "0.557785", "0.5573462", "0.55716103", "0.5546931", "0.5524471", "0.5520423", "0.5482463", "0.54753685", "0.54540724", "0.54462266", "0.54154456", "0.54092914", "0.5401386", "0.5396953", "0.53888535", "0.538069", "0.53602535", "0.5360207", "0.53480536", "0.5345933" ]
0.6125912
1
Formats this date using week form, for example 1969W297 basic True/False, selects basic form, e.g., 1969W297. Default is False truncation
def GetWeekString(self, basic=False, truncation=NoTruncation): century, decade, year, week, day = self.GetWeekDay() if day is None: if week is None: # same as the calendar string return self.GetCalendarString(basic, truncation) else: if truncation == NoTruncation: if basic: return "%02i%i%iW%02i" % (century, decade, year, week) else: return "%02i%i%i-W%02i" % (century, decade, year, week) elif truncation == Truncation.Century: if basic: return "%i%iW%02i" % (decade, year, week) else: return "%i%i-W%02i" % (decade, year, week) elif truncation == Truncation.Decade: if basic: return "-%iW%02i" % (year, week) else: return "-%i-W%02i" % (year, week) elif truncation == Truncation.Year: return "-W%02i" % week else: raise ValueError else: if truncation == NoTruncation: if basic: return "%02i%i%iW%02i%i" % ( century, decade, year, week, day) else: return "%02i%i%i-W%02i-%i" % (century, decade, year, week, day) elif truncation == Truncation.Century: if basic: return "%i%iW%02i%i" % (decade, year, week, day) else: return "%i%i-W%02i-%i" % (decade, year, week, day) elif truncation == Truncation.Decade: if basic: return "-%iW%02i%i" % (year, week, day) else: return "-%i-W%02i-%i" % (year, week, day) elif truncation == Truncation.Year: if basic: return "-W%02i%i" % (week, day) else: return "-W%02i-%i" % (week, day) elif truncation == Truncation.Week: return "-W-%i" % day else: raise ValueError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def date_with_day_of_week_appended(mydate): \n import datetime\n month, day, year = (int(x) for x in mydate.split('/')) \n shortened_year = abs(year) % 100 \n day_of_week = datetime.date(year, month, day).strftime(\"%A\")\n return \"%s/%s/%s %s\" % (month,day,shortened_year, day_of_week)", "def formatWeek(self, themonth, theweek, num_weeks):\n s = ''.join(self.formatDay(themonth, d, num_weeks) for d in theweek)\n return '<tr>%s</tr>' % s", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def weeknumber_option(option):\n option = option.lower()\n if option == 'left':\n return 'left'\n elif option == 'right':\n return 'right'\n elif option in ['off', 'false', '0', 'no', 'none']:\n return False\n else:\n raise VdtValueError(\n \"Invalid value '{}' for option 'weeknumber', must be one of \"\n \"'off', 'left' or 'right'\".format(option))", "def formatweek(self, theweek, events):\n s = ''.join(self.formatday(d, wd, events) for (d, wd) in theweek)\n return '<tr>%s</tr>' % s", "def formatweek(self, theweek, schedules):\n s = ''.join(self.formatday(d, wd, schedules) for (d, wd) in theweek)\n return '<tr>%s</tr>' % s", "def _create_week_dates_text(self):\n week_start = []\n week_end = []\n week_text = []\n week_start.append(self.start_date)\n week_end.append(self.start_date + timedelta(days=6))\n week_start.append(week_end[0] + timedelta(days=1))\n week_end.append(self.display_end_date)\n for i in (0,1):\n week_start_month = week_start[i].strftime(\"%b\")\n week_start_day = week_start[i].strftime(\"%d\").lstrip(\"0\")\n week_end_month = week_end[i].strftime(\"%b\")\n week_end_day = week_end[i].strftime(\"%d\").lstrip(\"0\")\n week_text.append(\"%s %s - %s %s\" %(week_start_month, \n week_start_day, week_end_month, week_end_day))\n return week_text", "def formatweekheader(self):\n s = ''.join(self.formatweekday(i) for i in self.iterweekdays())\n return '<tr class=\"weekheader\">%s</tr>' % s", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def formatWeekHeader(self):\n s = ''.join(self.formatWeekDay(i) for i in self.iterweekdays())\n return '<tr>%s</tr>' % s", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def test_status_dictify_include_week(self):\n d = datetime(2014, 5, 8, 17, 17, 51, 0)\n with self.app.app_context():\n s = status(content='my status update', created=d, save=True)\n d1 = s.dictify(include_week=False)\n eq_(d1.get(\"week_start\", None), None)\n eq_(d1.get(\"week_end\", None), None)\n\n d2 = s.dictify(include_week=True)\n eq_(d2.get(\"week_start\", None), \"2014-05-05\")\n eq_(d2.get(\"week_end\", None), \"2014-05-11\")", "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")", "def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))", "def isoweekday(self):\n # 1-Jan-0001 is a Monday\n return self.toordinal() % 7 or 7", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)", "def _normalize_publication_datetime(self, article_publication_datetime):\n datetime_parsed = dateparser.parse(article_publication_datetime)\n week_start = datetime_parsed - datetime.timedelta(\n days=datetime_parsed.weekday()\n )\n week_end = week_start + datetime.timedelta(days=6)\n\n return (\n week_start.strftime('%d-%m-%Y'),\n week_end.strftime('%d-%m-%Y')\n )", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def get_week_date(self, raw_week: str) -> tuple:\n\n search_result = re.search(r'^(\\d+.\\d+)\\s+-\\s+\\d+.\\d+', raw_week)\n\n if \"from\" in raw_week:\n week = re.sub(r'^\\D+', '', raw_week)\n\n elif search_result:\n week = search_result.group(1)\n else:\n week = \"{}.{}\".format(current_day, current_month)\n\n week_in_date_format_1900 = datetime.datetime.strptime(week, \"%d.%m\")\n currect_week = week_in_date_format_1900.replace(current_year)\n\n return currect_week.isoformat(), currect_week.isocalendar()[1]", "def weekly():", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def format_dow(value):\n if value:\n return [\n 'Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n ][value]\n else:\n return 'N/A'", "def day_of_week(self) -> str:\n return self.elements[4]", "def formatWeekDay(self, day):\n return '<th class=\"day\">%s</th>' % day_abbr[day]", "def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))" ]
[ "0.64124775", "0.6225047", "0.5945458", "0.5922567", "0.5873666", "0.5869508", "0.58374524", "0.5801019", "0.5774269", "0.57505846", "0.5708486", "0.56964016", "0.5655071", "0.56362695", "0.55953693", "0.557234", "0.55412495", "0.55311793", "0.5479055", "0.54724705", "0.5455859", "0.5448059", "0.5430915", "0.5429073", "0.5409047", "0.5400757", "0.5367827", "0.53194344", "0.5301311", "0.5266561" ]
0.6724056
0
LeapYear returns True if this date is (in) a leap year and False otherwise. Note that leap years fall on all years that divide by 4 except those that divide by 100 but including those that divide by 400.
def LeapYear(self): if self.year is None: raise DateTimeError( "Insufficient precision for leap year calculation") if self.year % 4: # doesn't divide by 4 return False elif self.year: # doesn't divide by 100 return True elif self.century % 4: # doesn't divide by 400 return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isLeapYear(self):\n return (self._year % 4 == 0 and\n (self._year % 100 != 0 or self._year % 400 == 0))", "def is_leap_year(self):\n if self.year % 400 == 0:\n return True\n elif self.year % 100 == 0:\n return False\n elif self.year % 4 == 0:\n return True\n return False", "def is_leap_year(self):\n if self.year % 400 == 0:\n return True\n elif self.year % 100 == 0:\n return False\n elif self.year % 4 == 0:\n return True\n return False", "def leap_year(self, year):\n\n\t\tif (year % 4 == 0):\n\t\t\tif (year % 100 == 0):\n\t\t\t\tif (year % 400 == 0):\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\treturn False", "def is_leap_year(self):\n if (self.year / 4).is_integer():\n return True\n return False", "def is_leap_year(self):\n\n yr = self.year\n if not yr%4 == 0:\n return False\n elif not yr%100 == 0: #if divisible by 4 and not divisible by 100\n return True\n elif not yr%400 == 0: #if divisible by 4, divisible by 100 and not divisible 400\n return False\n else:\n return True", "def is_leap_year(year):\n mod = divider(year)\n return mod(4) and not mod(100) or mod(400)", "def isLeapYear(self):\n if self.year % 400 == 0: return True\n elif self.year % 100 == 0: return False\n elif self.year % 4 == 0: return True\n return False", "def is_leap_year(year: int) -> bool:\n return (year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))", "def leap_year(year: int) -> bool:\n return year % 4 == 0 and \\\n (year % 100 != 0 or year % 400 == 0)", "def leap_year(self):\n\n if self.time_stamp.year % 4 != 0:\n return False\n if self.time_stamp.year % 100 != 0:\n return True\n if self.time_stamp.year % 400 != 0:\n return False\n return True", "def is_leap_year(year: int) -> bool:\n if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):\n return True\n return False", "def is_leap_year(year):\n return ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0)", "def is_leap_year(year):\n if (year % 4) != 0:\n return False\n elif (year % 100) != 0:\n return True\n elif (year % 400) != 0:\n return False\n else:\n return True", "def is_leap_year(year):\n if ((year % 4 == 0) and (year % 100 != 0)) or year % 400 == 0:\n return True\n else:\n return False", "def is_leap_year(year_):\n if year_ % 4 == 0:\n if year_ % 100 == 0:\n if year_ % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False", "def is_leap_year(year):\n return year % 400 == 0 or (year % 4 == 0 and not year % 100 == 0)", "def is_leap_year(year):\n\n if year % 400 == 0:\n return True\n\n if year % 100 == 0:\n return False\n\n if year % 4 == 0:\n return True", "def isLeapYear(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap_year(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap_year(year):\n assert year >= 1583\n return ((year % 4 == 0) and year != 100) or (year % 400 == 0)", "def isLeapYear( year):\n if (year % 400 == 0) :\n return True\n elif (year % 100 == 0) :\n return False\n elif (year % 4 == 0):\n return True\n else:\n return False", "def is_leap(year):\n\tleap = False\n\n\tif (year % 4) == 0:\n\t\tif (year % 100) == 0:\n\t\t\tif (year % 400) == 0:\n\t\t\t\tleap = True\n\t\telse:\n\t\t\tleap = True\n\n\treturn leap", "def LeapYear(year):\n if year % 4:\t\t\t# doesn't divide by 4\n return False\n elif year % 100:\t\t# doesn't divide by 100\n return True\n elif year % 400:\t\t# doesn't divide by 400\n return False\n else:\n return True", "def leapyear(year):\n\n\n if not year % 4 == 0:\n return False\n elif not year % 100 == 0:\n return False\n elif not year % 400 == 0:\n return False\n else:\n return True", "def leap_year(year):\n if (year % 4) != 0: # not devisible by 4 \n return False\n\n if (year % 400) != 0 and (year % 100) == 0: # century but not divisible by 400\n return False\n \n return True", "def is_leap_year(year):\n # Please note, this is bad.\n # The previous iteration is much better, I just wanted to see how\n # bad this would look if I put it in one line.\n return False if year % 4 != 0 else True if year % 100 != 0 else year % 400 == 0", "def leap_year(year:int) -> bool:\n if(year % 4 == 0 and year % 100 == 0) or year % 400 == 0:\n return True\n else:\n return False", "def isleap(year):\n if year%4 == 0:\n return True\n elif year%100 == 0:\n return False\n elif year%400 == 0:\n return True\n else:\n return False", "def is_leap_year(cls, year):\n return mod(year * cls.ARYA_SOLAR_YEAR - cls.ARYA_SOLAR_MONTH, cls.ARYA_LUNAR_MONTH) >= 23902504679/1282400064" ]
[ "0.8488528", "0.8269582", "0.8269582", "0.8265238", "0.8233334", "0.81821555", "0.811367", "0.80953074", "0.8048502", "0.80156183", "0.80101466", "0.7984513", "0.7975971", "0.7968858", "0.7950488", "0.79429245", "0.79221284", "0.792168", "0.7919215", "0.7919154", "0.7884527", "0.7876576", "0.7872179", "0.78488755", "0.78424823", "0.78250134", "0.7812194", "0.7806377", "0.7777893", "0.77687234" ]
0.8329149
1
Returns a tuple of (hour,minute,second,zone direction,zone offset) as defined in GetTime and GetZone.
def GetTimeAndZone(self): return self.hour, self.minute, self.second, self.zDirection, self.zOffset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def utctimetuple(self):\n offset = self.utcoffset()\n if offset:\n self -= offset\n y, m, d = self.year, self.month, self.day\n hh, mm, ss = self.hour, self.minute, self.second\n return _build_struct_time(y, m, d, hh, mm, ss, 0)", "def GetTime(self):\n return self.hour, self.minute, self.second", "def tz_name_and_utc_offset(self) -> typing.Tuple[str, float]:\n month = MONTH_NAMES.index(self.event_month) + 1\n timezone = caimira.data.weather.timezone_at(\n latitude=self.location_latitude, longitude=self.location_longitude,\n )\n # We choose the first of the month for the current year.\n date = datetime.datetime(datetime.datetime.now().year, month, 1)\n name = timezone.tzname(date)\n assert isinstance(name, str)\n utc_offset_td = timezone.utcoffset(date)\n assert isinstance(utc_offset_td, datetime.timedelta)\n utc_offset_hours = utc_offset_td.total_seconds() / 60 / 60\n return name, utc_offset_hours", "def get_timezone_offset():\n timezone = get_localzone()\n offset_minutes = timezone.utcoffset(datetime.datetime.now()).total_seconds() // SECONDS_IN_MINUTE\n return parse_int(offset_minutes)", "def get_time(self, place=None, lat=None, lon=None):\n\n # obtain longitude and latitude, if they are not set\n if lat is None and lon is None:\n lon, lat = self.obtain_geo_codes(place)\n print(\"The lon and lat of %s, is: %s, %s\" % (place, lon, lat))\n # gaining geo location may fail\n if lat is None and lon is None:\n return None, None\n\n d = datetime.utcnow()\n timestamp = calendar.timegm(d.utctimetuple())\n data = {'location': str(lat) + ',' + str(lon),\n 'timestamp': int(timestamp),\n 'language': 'en'}\n\n self.system_logger.info(\"GoogleTime request:\\n\" + ' + ' + str(data))\n\n try:\n page = urlopen('https://maps.googleapis.com/maps/api/timezone/json?' + urlencode(data))\n except HTTPError as e:\n print(e.code)\n return None, None\n else:\n response = json.loads(str(page.read(), \"utf-8\"))\n self._log_response_json(response)\n time, time_zone = self.parse_time(response)\n self.system_logger.info(\"GoogleTime response:\\n\" + str(time) + \",\" + str(time_zone))\n return time, time_zone", "def get_time_info(self):\n\n raise NotImplementedError", "def get_hour_offsets(self):\n starttime = self.parameters['startlocaltime']\n stoptime = self.parameters['stoplocaltime']\n timediff = (stoptime - starttime)\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n\n \"\"\"\n timediff = (stoptime - starttime).total_seconds()\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n #TODO: How do we want to handle large images with huge time differences?\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n starttime = starttime\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n \"\"\"\n #Given the image start time, find the nearest index and set to the middle,\n # then find the adjacent two nodes in both directions to get allow a\n # cubic interpolation.\n #image_time = starttime.hour + starttime.minute / 60.0\n # This grabs the hour that is nearest, but hour is circular\n image_time = starttime\n if abs(image_time - 24) < abs(image_time - 23.5):\n image_time -= 24\n mididx, midhour = utils.getnearest(self.times, image_time)\n logger.debug(\"Time is {}. The nearest lookup node is {}\".format(image_time, mididx))\n minidx = mididx - 2\n maxidx = mididx + 2\n\n hourslice = np.arange(minidx, maxidx + 1, dtype=np.int8)\n\n hourslice[hourslice < 0] += 18\n\n if hourslice[-1] >= len(self.times):\n #The hour slice needs to be shifted over the time break\n hourslice[hourslice >= len(self.times)] -= len(self.times)\n logger.debug(\"Using indices {} and start time of {}.\".format(hourslice, image_time))\n return hourslice, image_time", "def timetuple(self):\n dst = self.dst()\n if dst is None:\n dst = -1\n elif dst:\n dst = 1\n else:\n dst = 0\n return _build_struct_time(\n self.year, self.month, self.day, self.hour, self.minute, self.second, dst\n )", "def timezone():\n \n pass", "def get_timzone_offset(self, timezone):\n raise NotImplementedError", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def hour():\r\n\r\n date = datetime.datetime.now()\r\n hours = date.hour\r\n minute = date.minute\r\n\r\n return hours, minute", "def _get_time_info(self, keys: list[str]):\n if self.is_info_v2:\n if not self.is_on:\n return 0\n return self.int_or_none(self._data.get(keys[1]))\n return self._data.get(keys[0])", "def offset(self):\n\n offsetList = ['12 am', '1 am', '2 am', '3 am', '4 am', '5 am', '6 am', '7 am', '8 am', '9 am',\n '10 am', '11 am', '12 pm', '1 pm', '2 pm', '3 pm', '4 pm', '5 pm', '6 pm', '7 pm',\n '8 pm', '9 pm', '10 pm', '11 pm', '12 pm']\n\n firstTimeHour = self.firstTime.time().hour\n print ('First Time Hour:', firstTimeHour)\n\n m2 = str(self.firstTime.time())\n m2 = datetime.datetime.strptime(m2, '%I:%M %p')\n print(m2)", "def time_zone(self):\n\n\t\tg = geocoders.GoogleV3()\n\n\t\t#Gives the name of the timezone, ex: Africa/Luanda\n\t\ttimezone_name = str(g.timezone((self.latitude_value(), self.longitude_value())))\n\n\t\t#Returns the numeric value of the timezone, ex: +0100\n\t\treturn int(pytz.timezone(timezone_name).localize(datetime.datetime(2011,1,1)).strftime('%z'))/100", "def _local_time_offset():\n if time.localtime().tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone", "def get_tz_offset(self) -> float:\n return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60", "def timezone():\n\n return time.timezone", "def GetTimeAndDurationOfPath(PathInfo):\r\n\tif not PathInfo:\r\n\t\treturn None \r\n\tif len(PathInfo) < 2: return None \r\n\r\n\tarrival_first_station = PathInfo[0][ConnInfoInd['arrival_hour']]*60 + PathInfo[0][ConnInfoInd['arrival_min']]\r\n\t# departure_first_station = PathInfo[1][ConnInfoInd['departure_hour']]*60 + PathInfo[1][ConnInfoInd['departure_min']]\r\n\r\n\tarrival_last_station = PathInfo[-1][ConnInfoInd['arrival_hour']]*60 + PathInfo[-1][ConnInfoInd['arrival_min']]\r\n\tTotalDuration = arrival_last_station - arrival_first_station\r\n\treturn (TotalDuration, arrival_first_station, arrival_last_station)", "def localtime(stamp):\n return stamp - utc_offset", "def get_time(offset=0, with_second=True):\n today = datetime.datetime.now() - datetime.timedelta(seconds=offset)\n hour = str(today.hour)\n minute = str(today.minute)\n second = str(today.second)\n if with_second:\n return hour+'-'+minute+'-'+second\n else:\n return hour+'-'+minute", "def get_time(self):\n return datetime.datetime.now(self.time_zone)", "def timetz(self):\n return time(\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self._tzinfo,\n fold=self.fold,\n )", "def time_zone():\n return timezone('Etc/GMT-10')", "def get_time():\n\teastern = timezone('US/Eastern')\n\tnow = datetime.datetime.now(eastern).time()\n\treturn(now)", "def get_current_india_time():\n india_offset = datetime.timedelta(hours=5, minutes=30)\n in_time = datetime.datetime.utcnow() + india_offset\n return in_time", "def Time(self):\n return '%2.2d:%2.2d:%2.2d' % (self._hour, self._minute, self._nearsec)", "def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()", "def timezone(self):\n tz_data = self._router_request(\n self._make_request_data(\n 'getTimeZone',\n data=dict()\n )\n )\n\n return tz_data['data']", "def get_time(self):\n start=''\n end=''\n time=''\n times=self.times\n print(times[self.istep])\n if self.istep > 0:\n start=ncEarth.beginstr % times[self.istep].isoformat()\n\n\n if self.istep < len(times)-2:\n end = ncEarth.endstr % times[self.istep+1].isoformat()\n\n if start is not '' or end is not '':\n time=ncEarth.timestr % {'begin':start,'end':end}\n\n return time" ]
[ "0.65421635", "0.65087676", "0.6242266", "0.61258656", "0.58964926", "0.5847104", "0.58026797", "0.5802542", "0.57943493", "0.57925767", "0.5730232", "0.5720724", "0.5711272", "0.57005984", "0.56693137", "0.561445", "0.56028926", "0.55888134", "0.55880094", "0.55877244", "0.55746704", "0.5572935", "0.55525535", "0.5549715", "0.5528079", "0.55218697", "0.5516727", "0.5510267", "0.55102366", "0.55015874" ]
0.8203086
0
UpdateStructTime changes the hour, minute, second and isdst fields of t, a struct_time, to match the values in this time. isdst is always set to 1
def UpdateStructTime(self, t): if not self.Complete(): raise DateTimeError("UpdateStructTime requires a complete time") t[3] = self.hour t[4] = self.minute t[5] = self.second t[8] = -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def UpdateStructTime(self, t):\n self.date.UpdateStructTime(t)\n self.time.UpdateStructTime(t)", "def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires complete date\")\n t[0] = self.century * 100 + self.year\n t[1] = self.month\n t[2] = self.day\n t[6] = self.GetWeekDay()[4] - 1\n t[7] = self.GetOrdinalDay()[2]", "def correct_dt_dst(datetime_obj):\n\n # https://en.wikipedia.org/wiki/Tz_database\n # https://www.iana.org/time-zones\n\n if datetime_obj.tzinfo is None:\n return datetime_obj\n\n # Create and return a New datetime object. This corrects the DST if errors are present.\n return dt(datetime_obj.year,\n datetime_obj.month,\n datetime_obj.day,\n datetime_obj.hour,\n datetime_obj.minute,\n datetime_obj.second,\n datetime_obj.microsecond,\n tzinfo=datetime_obj.tzinfo)", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def __init__(self, struct_time):\r\n\t\tself.struct_time = struct_time\r\n\t\tself.year = struct_time[0]\r\n\t\tself.mon = self.set_month(struct_time[1])\r\n\t\tself.day = struct_time[2]\r\n\t\tself.hour = struct_time[3]\r\n\t\tself.min = struct_time[4]\r\n\t\tself.wday = self.set_week_day(struct_time[6])\r\n\t\tself.day_or_night = self.set_day_state(struct_time[8])", "def modify_struct(self, struct, is_full_struct):\n return struct", "def update_time(self):\n pass # Do nothing", "def update(self, dt):\n pass", "def dst(self, dt):", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update(self, dt):\n\t\tpass", "def update(self, dt=None): #pylint: disable=invalid-name\n if dt is None:\n dt = datetime.utcnow()\n\n self.update_location(self.old_location, dt - timedelta(seconds=1))\n self.update_location(self.current_location, dt)\n self.update_location(self.future_location, dt + timedelta(seconds=1))", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def update_by_delta(self):\n if (not self.smart_scheduled_for) or (not self.smart_schedule_info):\n # Doesn't depend on any other event.\n return\n\n delta_s = self.smart_schedule_info.get('delta_s')\n if delta_s is None: # Doesn't have a time delta.\n return\n\n delta_to_assoc_event = timedelta(seconds=delta_s)\n new_start_time = self.smart_scheduled_for.end_time + delta_to_assoc_event\n new_end_time = new_start_time + (self.end_time - self.start_time)\n\n self.start_time = new_start_time\n self.end_time = new_end_time\n self.save()", "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def timetuple(self):\n dst = self.dst()\n if dst is None:\n dst = -1\n elif dst:\n dst = 1\n else:\n dst = 0\n return _build_struct_time(\n self.year, self.month, self.day, self.hour, self.minute, self.second, dst\n )", "def _marshal_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour=0,\n tm_min=0,\n tm_sec=0,\n tm_wday=-1,\n tm_yday=-1,\n tm_isdst=-1,\n ):\n _struct_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour,\n tm_min,\n tm_sec,\n tm_wday,\n tm_yday,\n tm_isdst,\n )", "def _convert_struct_time_to_dt(stime):\n return date.fromtimestamp(mktime(stime))", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def update(self, dt):", "def update(self, dt):", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def update_time_entry(self, entry):\n create = entry.id == 0\n\n xml = self._serialise_time_entry(entry)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/time_entries?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/time_entries/%s?%s\" % \\\n (self._get_base_url(), entry.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create time entry.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_time_entry(ET.fromstring(data))", "def setTime(self, timeObj, day=None):\n\n # override day if it's None\n if not day:\n day = getDayFromNum(timeObj.weekday())\n\n self._fileCache[day][\"time-hr\"] = timeObj.hour\n self._fileCache[day][\"time-min\"] = timeObj.minute\n self._updateConfig()", "def _update_time(self, current=None, total=None):\n if current is None:\n current = self._current\n if total is None:\n total = self._total\n\n if self._last_time is None:\n self._last_time = datetime.datetime.now()\n self._remaining_time = \"?\"\n else:\n diff = datetime.datetime.now() - self._last_time\n self._last_time = datetime.datetime.now()\n diff = (diff.seconds * 1E6 + diff.microseconds) /\\\n (current - self._last_current) * (total - current) / 1E6\n self._last_current = current\n\n if diff > 3600:\n h = round(diff//3600)\n m = round((diff - h*3600)/60)\n self._remaining_time = \"{0:d}h {1:d}m\".format(int(h), int(m))\n elif diff > 60:\n m = round(diff // 60)\n s = round((diff - m * 60))\n self._remaining_time = \"{0:d}m {1:d}s\".format(int(m), int(s))\n else:\n self._remaining_time = \"{0:d}s\".format(int(round(diff)))", "def test_update_dt(self):\n result = self.test_client.update_dt\n\n assert result == \"2020-02-18 01:54:13\"", "def do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega):\n t = t + dt\n x = x + vx*dt\n y = y + vy*dt\n z = z + vz*dt\n\n vz = vz - S0*vx*omega/m*dt # update vz first to use current value of vx\n v = math.sqrt(vx*vx + vy*vy + vz*vz)\n vx = vx - B2/m*v*vx*dt\n vy = vy - g*dt - B2/m*v*vy*dt\n\n return t, x, y, z, vx, vy, vz", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)" ]
[ "0.80075157", "0.6960625", "0.52754", "0.5093788", "0.5083209", "0.50688696", "0.505344", "0.50233996", "0.4956732", "0.4941154", "0.49334556", "0.49287647", "0.4906301", "0.4906301", "0.48946878", "0.48796442", "0.48642966", "0.47915608", "0.47877473", "0.4778558", "0.476136", "0.476136", "0.4757172", "0.4756713", "0.4709081", "0.47050345", "0.47045267", "0.4697651", "0.4640426", "0.4640426" ]
0.77810526
1
Time can hold partially specified times, we deal with comparisons in a similar way to Date.__cmp__ in that times must have the same precision to be comparable. Although this behaviour is consistent it might seem strange at first as it
def __cmp__(self, other): if not isinstance(other, Time): raise TypeError if self.GetPrecision() != other.GetPrecision(): raise ValueError( "Incompatible precision for comparison: " + str(other)) zDir = self.GetZoneOffset() otherZDir = other.GetZoneOffset() if zDir != otherZDir: raise ValueError("Incompatible zone for comparison: " + str(other)) result = cmp(self.hour, other.hour) if not result: result = cmp(self.minute, other.minute) if not result: result = cmp(self.second, other.second) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def test_general_subset_invalid_time():\n pass", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def validateTiming(obj):\n if obj.start_time:\n if obj.end_time:\n return obj.start_time <= timezone.now() and obj.end_time > timezone.now()\n else:\n return obj.start_time <= timezone.now()\n else:\n if obj.end_time:\n return obj.end_time > timezone.now()\n else:\n return True", "def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def is_time(self) -> bool:\n return self.times > 1", "def __cmp__(self, other):\n if not isinstance(other, TimePoint):\n other = type(self)(other)\n # we need to follow the rules for comparing times\n if self.time.GetPrecision() != other.time.GetPrecision():\n raise ValueError(\n \"Incompatible precision for comparison: \" + str(other))\n z1 = self.time.GetZoneOffset()\n z2 = other.time.GetZoneOffset()\n if z1 != z2:\n if z1 is None or z2 is None:\n raise ValueError(\"Can't compare zone: \" + str(other))\n # we need to change the timezone of other to match ours\n other = other.ShiftZone(*self.time.GetZone3())\n result = cmp(self.date, other.date)\n if not result:\n result = cmp(self.time, other.time)\n return result", "def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction", "def check_time(startTime, endTime):\n\n now = datetime.now()\n startTimeObj = datetime.strptime(startTime, '%I:%M%p')\n endTimeObj = datetime.strptime(startTime, '%I:%M%p')\n\n if startTimeObj.hour <= now.hour <= endTimeObj.hour and \\\n startTimeObj.minute <= now.minute <= endTimeObj.minute:\n return True", "def _compare_times(drive_file_time, local_file_time):\n local_time_zone = get_localzone()\n\n localized_drive_file_time = drive_file_time.astimezone(local_time_zone)\n localized_local_file_time = local_time_zone.localize(local_file_time)\n\n if localized_drive_file_time > localized_local_file_time:\n return 1\n elif localized_drive_file_time < localized_local_file_time:\n return -1\n else:\n return 0", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def test_as_specified(self):\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S'),\n '2020-07-31 23:59:30')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_msec=True),\n '2020-07-31 23:59:30.357')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def time_before(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a < time_b", "def test_timer_order(self):\n time_from_int = [Time(1000), Time(4000), Time(7000), Time(10000)]\n time_from_int_equal = [Time(1), Time(1)]\n self._check_sequence_consistency(time_from_int)\n self._check_sequence_consistency(time_from_int_equal, equal=True)\n\n time_from_datetime = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us))\n for us in (2, 5, 8, 11)]\n time_from_datetime_equal = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us))\n for us in (1, 1)]\n self._check_sequence_consistency(time_from_datetime)\n self._check_sequence_consistency(time_from_datetime_equal, equal=True)\n\n time_from_string = [Time(\"00:00:00.000003000\"), Time(\"00:00:00.000006000\"),\n Time(\"00:00:00.000009000\"), Time(\"00:00:00.000012000\")]\n time_from_string_equal = [Time(\"00:00:00.000004000\"), Time(\"00:00:00.000004000\")]\n self._check_sequence_consistency(time_from_string)\n self._check_sequence_consistency(time_from_string_equal, equal=True)\n\n self._check_sequence_consistency(self._shuffle_lists(time_from_int, time_from_datetime, time_from_string))", "def time_in_range(start, end, time):\n if start <= end:\n return start <= time <= end\n else:\n return start <= time or time <= end", "def __eq__(self, other):\n return self.times == other.times", "def isRestrictionTime(time_str):\n ValidTime.validateTime(time_str)\n time = datetime.strptime(time_str, \"%H:%M\").time()\n morning_ini, morning_fin = PicoPlaca.__getTimeRestriction(\"M\")\n if morning_ini <= time <= morning_fin:\n return True\n\n afternoon_ini, afternoon_fin = PicoPlaca.__getTimeRestriction(\"A\")\n if afternoon_ini <= time <= afternoon_fin:\n return True\n\n return False", "def time_after(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a > time_b", "def __isub__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___isub__(self, *args, **kwargs)", "def test_details_time(self):\n self.assertLess(self.details.time, datetime.now(timezone.utc))", "def _validate_time_fields(cls, item):\n if item.time_started_msec and (\n item.time_queued_msec > item.time_started_msec):\n cls._add_error(\n 'time queued check',\n 'Entity id %s: time queued %s is greater '\n 'than time started %s' % (\n item.id, item.time_queued_msec, item.time_started_msec))\n\n if item.time_finished_msec and (\n item.time_started_msec > item.time_finished_msec):\n cls._add_error(\n 'time started check',\n 'Entity id %s: time started %s is greater '\n 'than time finished %s' % (\n item.id, item.time_started_msec, item.time_finished_msec))\n\n current_time_msec = utils.get_current_time_in_millisecs()\n if item.time_finished_msec > current_time_msec:\n cls._add_error(\n 'time finished check',\n 'Entity id %s: time finished %s is greater '\n 'than the current time' % (\n item.id, item.time_finished_msec))", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def test_time_field():", "def within_time_interval(self, from_time, to_time, time):\n\t\tif from_time == None:\n\t\t\tif to_time == None:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn bool(time <= to_time)\n\t\telse:\n\t\t\tif to_time == None:\n\t\t\t\treturn bool(time >= from_time)\n\n\t\treturn bool((time >= from_time) and (time <= to_time))", "def is_after(t1,t2):\n return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)", "def test_parse_time_special_values(self):\n now1 = datetime(2015, 2, 1, 0, 0, 0)\n now2 = datetime(2015, 1, 24, 10, 15, 25)\n self.assertEqual(parse_time(\"now\", now1), now1)\n self.assertEqual(parse_time(\"now\", now2), now2)\n self.assertEqual(\n parse_time(\"yesterday\", now1), datetime(2015, 1, 31, 0, 0, 0))\n self.assertEqual(\n parse_time(\"yesterday\", now2), datetime(2015, 1, 23, 10, 15, 25))\n self.assertEqual(parse_time(\"today\", now1), now1)\n self.assertEqual(parse_time(\"today\", now2), now2)\n self.assertEqual(\n parse_time(\"tomorrow\", now1), datetime(2015, 2, 2, 0, 0, 0))\n self.assertEqual(\n parse_time(\"tomorrow\", now2), datetime(2015, 1, 25, 10, 15, 25))", "def time_overlap(d1, d2):\n gt1, gt2, vt1, vt2 = parse_date(d1[\"t1\"]), parse_date(d1[\"t2\"]), parse_date(d2[\"t1\"]), parse_date(d2[\"t2\"])\n return (gt1 != vt2) and (vt1 != gt2) and (gt1 <= vt2) and (vt1 <= gt2)", "def test_parse_time_with_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assertEqual(parse_time(\"-0s\", now), now)\n self.assertEqual(\n parse_time(\"-1s\", now), datetime(2015, 1, 31, 23, 59, 59))\n self.assertEqual(\n parse_time(\"-1s\", now), datetime(2015, 1, 31, 23, 59, 59))\n self.assertEqual(\n parse_time(\"-2w\", now), datetime(2015, 1, 18, 0, 0, 0))\n self.assertEqual(\n parse_time(\"-2w\", datetime(2015, 1, 24, 10, 15, 25)),\n datetime(2015, 1, 10, 10, 15, 25))" ]
[ "0.7218274", "0.6513318", "0.6404388", "0.6403199", "0.63935935", "0.63328564", "0.6318335", "0.62957954", "0.62863904", "0.627176", "0.6242979", "0.6178251", "0.6178251", "0.6159284", "0.61410797", "0.61352104", "0.6111098", "0.6096023", "0.6093109", "0.60737616", "0.6073266", "0.60654444", "0.60630625", "0.6050072", "0.6036201", "0.60318375", "0.6028874", "0.6022375", "0.60179013", "0.60020065" ]
0.67124844
1
UpdateStructTime changes the year, month, date, hour, minute and second fields of t, a struct_time, to match the values in this date.
def UpdateStructTime(self, t): self.date.UpdateStructTime(t) self.time.UpdateStructTime(t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires a complete time\")\n t[3] = self.hour\n t[4] = self.minute\n t[5] = self.second\n t[8] = -1", "def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires complete date\")\n t[0] = self.century * 100 + self.year\n t[1] = self.month\n t[2] = self.day\n t[6] = self.GetWeekDay()[4] - 1\n t[7] = self.GetOrdinalDay()[2]", "def __init__(self, struct_time):\r\n\t\tself.struct_time = struct_time\r\n\t\tself.year = struct_time[0]\r\n\t\tself.mon = self.set_month(struct_time[1])\r\n\t\tself.day = struct_time[2]\r\n\t\tself.hour = struct_time[3]\r\n\t\tself.min = struct_time[4]\r\n\t\tself.wday = self.set_week_day(struct_time[6])\r\n\t\tself.day_or_night = self.set_day_state(struct_time[8])", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def svn_info_t_prop_time_set(svn_info_t_self, apr_time_t_prop_time): # real signature unknown; restored from __doc__\n pass", "def _convert_struct_time_to_dt(stime):\n return date.fromtimestamp(mktime(stime))", "def _marshal_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour=0,\n tm_min=0,\n tm_sec=0,\n tm_wday=-1,\n tm_yday=-1,\n tm_isdst=-1,\n ):\n _struct_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour,\n tm_min,\n tm_sec,\n tm_wday,\n tm_yday,\n tm_isdst,\n )", "async def put_date_time( # pylint: disable=inconsistent-return-statements\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def test_update_dt(self):\n result = self.test_client.update_dt\n\n assert result == \"2020-02-18 01:54:13\"", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def modify_struct(self, struct, is_full_struct):\n return struct", "def mod_time(self, mod_time):\n\n self._mod_time = mod_time", "def mod_time(self, mod_time):\n\n self._mod_time = mod_time", "def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )", "def setSubmitTime(t):", "def update_time(self, update_time):\n\n self._update_time = update_time", "async def put_date_time( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def update_time(self):\n pass # Do nothing", "def update_time_entry(self, entry):\n create = entry.id == 0\n\n xml = self._serialise_time_entry(entry)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/time_entries?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/time_entries/%s?%s\" % \\\n (self._get_base_url(), entry.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create time entry.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_time_entry(ET.fromstring(data))", "def _convert_struct_time_to_dt(stime):\n\n dt = datetime.datetime.fromtimestamp(mktime(stime))\n\n return dt.date()", "def update_structure(self, course_key, structure):\n self._clear_cache(structure['_id'])\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.structures[structure['_id']] = structure\n else:\n self.db_connection.insert_structure(structure, course_key)", "def do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega):\n t = t + dt\n x = x + vx*dt\n y = y + vy*dt\n z = z + vz*dt\n\n vz = vz - S0*vx*omega/m*dt # update vz first to use current value of vx\n v = math.sqrt(vx*vx + vy*vy + vz*vz)\n vx = vx - B2/m*v*vx*dt\n vy = vy - g*dt - B2/m*v*vy*dt\n\n return t, x, y, z, vx, vy, vz", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]" ]
[ "0.8136957", "0.7983699", "0.5842707", "0.5501244", "0.5400125", "0.5110482", "0.5087801", "0.50174713", "0.5017022", "0.50135535", "0.49094248", "0.49081317", "0.48958427", "0.4876365", "0.48754254", "0.48754254", "0.4823265", "0.4822224", "0.48168054", "0.4796931", "0.47922343", "0.47645354", "0.47304565", "0.47304565", "0.47297326", "0.4725768", "0.47124398", "0.46880293", "0.4683345", "0.4671459" ]
0.888156
0
Constructs a TimePoint from a string representation. Truncated forms are parsed with reference to base.
def from_str(cls, src, base=None, tDesignators="T"): if type(src) in StringTypes: p = ISO8601Parser(src) tp, f = p.ParseTimePointFormat(base, tDesignators) return tp else: raise TypeError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_str(cls, timestamp_str):\n units = timestamp_str.split(\":\")\n seconds_ms = units[-1].split(\".\")\n hours = int(units[0])\n minutes = int(units[1])\n seconds = int(seconds_ms[0])\n milliseconds = int(seconds_ms[1])\n return cls(hours, minutes, seconds, milliseconds)", "def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)", "def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)", "def from_str(cls, string):", "def from_str(cls, s):\n raise NotImplementedError", "def parse_input(s):\n if isinstance(s, six.integer_types):\n s = str(s)\n elif not isinstance(s, six.string_types):\n raise ValueError(s)\n\n original = s\n\n if s[-1:] == 'L':\n s = s[:-1]\n\n sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None)\n if sign is not None:\n s = s[1:]\n\n ts = 0\n for unit in _SORTED_UNITS:\n pos = s.find(unit[0])\n if pos == 0:\n raise ValueError(original)\n elif pos > 0:\n # If we find a unit letter, we're dealing with an offset. Default\n # to positive offset if a sign wasn't specified.\n if sign is None:\n sign = 1\n ts += int(s[:pos]) * __timedelta_millis(unit[1])\n s = s[min(len(s), pos + 1):]\n\n if s:\n ts += int(s)\n\n return date_from_utc_ts(ts) if not sign else \\\n utc() + sign * delta(milliseconds=ts)", "def parse(s):\n\n t = AbsoluteTimer()\n t.id = s.get(\"id\", None)\n t.name = s.get(\"name\", None)\n \n if s.has_key(\"abstime\"):\n\n parts = s[\"abstime\"].split(\" \")\n\n if len(parts) != 2:\n raise RuntimeError, \"Invalid date format\"\n\n dateparts = parts[0].split(\"-\")\n timeparts = parts[1].split(\":\")\n \n if len(dateparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n if len(timeparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n\n t.year = int(dateparts[0])\n t.month = int(dateparts[1])\n t.date = int(dateparts[2])\n t.hours = int(timeparts[0])\n t.minutes = int(timeparts[1])\n t.seconds = int(timeparts[2])\n\n return t", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))", "def parse_influxdb_time(t_str):\n try:\n return datetime.datetime.strptime(t_str[:26].rstrip('Z'), '%Y-%m-%dT%H:%M:%S.%f')\n except ValueError:\n return datetime.datetime.strptime(t_str[:19], '%Y-%m-%dT%H:%M:%S')", "def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))", "def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)", "def parse(text):\n parts = [int(part) for part in text.strip().split(',')]\n point = Point(*parts)\n actual = \"{},{},{},{}\".format(point.x, point.y, point.z, point.t)\n assert actual == text, diff(actual, text)\n return point", "def parse(s):\n\n if s[\"type\"] == 1:\n return IntervalTimer.parse(s)\n\n if s[\"type\"] == 2:\n return DayOfWeekTimer.parse(s)\n\n if s[\"type\"] == 3:\n return DayOfMonthTimer.parse(s)\n\n if s[\"type\"] == 4:\n return AbsoluteTimer.parse(s)\n\n raise RuntimeError, \"Parsing timer not implemented.\"", "def deserialize(self, str):\n try:\n if self.t_start is None:\n self.t_start = genpy.Time()\n if self.duration is None:\n self.duration = genpy.Duration()\n end = 0\n _x = self\n start = end\n end += 65\n (_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative,) = _get_struct_6d2I2iB().unpack(str[start:end])\n self.relative = bool(self.relative)\n self.t_start.canon()\n self.duration.canon()\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def parse_instant_string(cls, inst_str):\n #return dt.datetime.strptime(inst_str, '%Y-%m-%d %H:%M:%S')\n if inst_str is None or inst_str.__eq__('LATEST'):\n return None\n else:\n try:\n return Instant.parse(inst_str)\n except TypeError, e:\n raise TypeError(e)", "def fromstring(cls, string):\n string = cls.normalize_puzzle_string(string)\n size = int(sqrt(len(string)))\n square_size = int(sqrt(size))\n if size**2 != len(string) or square_size**2 != size:\n raise ValueError(\"Invalid input string length: %d\" % len(string))\n # TODO: remove this constraint for larger puzzles:\n if square_size != 3:\n raise ValueError(\"Code currently only supports 9x9 puzzles\")\n\n self = cls()\n # Fill in the cells at the places that are specified in the string\n for coords, char in zip(self.cells(), string):\n if char != '.':\n self.assign_value(coords, int(char))\n\n return self", "def __init__(self, timeString: str):\r\n #Example of input String and format\r\n #Jan 11, 2015, 11:21:12 PM EDT\r\n #\"%b %-d, %Y, %-I:%M:%S %p %Z\"\r\n dateList = timeString.split(',')\r\n self._month = dateList[0][0:3]\r\n self._day = int(dateList[0][-2:])\r\n self._year = int(dateList[1])\r\n\r\n dateList = dateList[2].split(':')\r\n self._minute = int(dateList[1])\r\n self._second = int(dateList[2][0:2])\r\n meridiem = dateList[2][3:5]\r\n self._hour = TimeStamp._adjustHour(int(dateList[0]), meridiem)", "def parse(s):\n t = IntervalTimer()\n t.id = s.get(\"id\", None)\n t.name = s.get(\"name\", None)\n if s.has_key(\"interval\"):\n ival = s[\"interval\"]\n if ival[-1:] == \"s\":\n t.seconds = int(ival[:-1])\n if ival[-1:] == \"m\":\n t.minutes = int(ival[:-1])\n if ival[-1:] == \"h\":\n t.hours = int(ival[:-1])\n if ival[-1:] == \"d\":\n t.days = int(ival[:-1])\n return t", "def from_str(cls, line):\n match = cls._re.search(line)\n if not match:\n return cls(None, None)\n groups = [int(d) for d in match.groups()]\n ymdhm1 = groups[:5]\n ymdhm2 = groups[5:10]\n hm3 = groups[10:]\n return cls(\n datetime.datetime(*ymdhm1),\n datetime.datetime(*ymdhm2),\n hm3[0] * 60 + hm3[1],\n )", "def fromStr(cls, s):\n assert isinstance(s, str), 'incorrect type of arg s: should be type str, is type {}'.format(type(s))\n s = [ int(n) for n in s.split('.') ]\n return cls(*s)", "def _parse_interval_str(cls, s):\n\n start, stop = s.split(':')\n if start == '':\n start = 0\n else:\n start = int(start)\n if stop == '':\n stop = None\n else:\n stop = int(stop)\n return slice(start, stop)", "def decode_timestamp(self, string):\n\n if isinstance(string, str):\n return datetime.strptime(string, self.timestamp_format)\n else:\n return string", "def set_at_from_string(self, string):\n\n self.at = iso8601.parse_date(string)", "def fromisoformat(cls, time_string):\n if not isinstance(time_string, str):\n raise TypeError(\"fromisoformat: argument must be str\")\n\n try:\n return cls(*_parse_isoformat_time(time_string))\n except Exception:\n raise ValueError(f\"Invalid isoformat string\")", "def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()", "def _str_to_dt(self):\n if isinstance(self.timepoint, str) and self._valid:\n timepoint = time.strftime('%Y-%m-% %H:%M:%S', self.timepoint)\n else:\n raise ValueError('Invalid Time Point Type')\n return timepoint", "def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue", "def from_string(representation: str) -> datetime:\n return parse(representation).replace(tzinfo=timezone.utc)", "def parseString(self, s):\n pass" ]
[ "0.6324874", "0.605796", "0.5917742", "0.5796603", "0.5753408", "0.57105887", "0.56728697", "0.5617218", "0.5584315", "0.5575762", "0.5562696", "0.5560282", "0.55567455", "0.5513849", "0.54915214", "0.54298156", "0.540465", "0.5389208", "0.53866446", "0.53139895", "0.53127724", "0.5296687", "0.5263024", "0.5257324", "0.52424645", "0.5238295", "0.52048975", "0.5185488", "0.51570153", "0.5146357" ]
0.7465002
0
Formats this TimePoint using ordinal form, for example 1969201T201740 basic True/False, selects basic form, e.g., 1969201T201740. Default is False truncation
def GetOrdinalString( self, basic=0, truncation=0, ndp=0, zonePrecision=Precision.Complete, dp=",", tDesignator="T"): return self.date.GetOrdinalString(basic, truncation) + tDesignator +\ self.time.GetString(basic, NoTruncation, ndp, zonePrecision, dp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_data_value(self, value):\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n elif isinstance(value, datetime.datetime):\n value = value.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return \"%s\" % value", "def only_ordinal(number):\n _ordinal = lambda n: \"%s\" % (\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4]) # some black magic.\n return _ordinal(number)", "def ordinal(number):\n _ordinal = lambda n: \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4]) # some black magic.\n return _ordinal(number)", "def GetOrdinalString(self, basic=False, truncation=NoTruncation):\n century, year, ordinalDay = self.GetOrdinalDay()\n if ordinalDay is None:\n # same as for calendar strings\n return self.GetCalendarString(basic, truncation)\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%02i%03i\" % (century, year, ordinalDay)\n else:\n return \"%02i%02i-%03i\" % (century, year, ordinalDay)\n elif truncation == Truncation.Century:\n if basic:\n return \"%02i%03i\" % (year, ordinalDay)\n else:\n return \"%02i-%03i\" % (year, ordinalDay)\n elif truncation == Truncation.Year:\n return \"-%03i\" % ordinalDay\n else:\n raise ValueError", "def ordinal_filter(value):\n digit = value % 10\n if 10 < value < 20:\n o = 'th'\n elif digit is 1:\n o = 'st'\n elif digit is 2:\n o = 'nd'\n elif digit is 3:\n o = 'rd'\n else:\n o = 'th'\n return '%d%s' % (value, o)", "def is_ordinal(self):\n return self._type == 'ordinal'", "def ordinal_label(n):\n n = int(n)\n return \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4])", "def ordinal(n):\n if 11 <= n <= 19:\n return str(n) + \"th\"\n s = str(n)\n last = int(s[-1])\n if 1 <= last <= 3:\n return s + (\"st\", \"nd\", \"rd\")[last-1]\n return s + \"th\"", "def short_time(self):\n return \"%s%02d\" % (util.SHORT_MONTH[self.month_num], self.year - 2000)", "def __formatDate(self, num):\n if len(num) < 2:\n num = '0'+num\n return num", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def ordinal(number: int):\n if type(number) != int:\n raise TypeError\n else:\n if 11 <= number <= 20 or number % 10 == 0:\n return str(number) + 'th'\n elif number % 10 == 1:\n return str(number) + 'st'\n elif number % 10 == 2:\n return str(number) + 'nd'\n elif number % 10 == 3:\n return str(number) + 'rd'\n else:\n return str(number) + 'th'", "def isoformat(self):\n s = '{0:04}'.format(self._year)\n if self._month:\n s += '-{0:02}'.format(self._month)\n if self._day:\n s += '-{0:02}'.format(self._day)\n return s", "def format_book_time(dt):\n return datetime.strftime(dt, \"%Y-%m-%dT%H:%M:%S%z\")", "def ordinal(num):\n if num > 9:\n secondToLastDigit = str(num)[-2]\n if secondToLastDigit == '1':\n return 'th'\n lastDigit = num % 10\n if (lastDigit == 1):\n return 'st'\n elif (lastDigit == 2):\n return 'nd'\n elif (lastDigit == 3):\n return 'rd'\n else:\n return 'th'", "def __str__(self):\n return f\"DatePosition: year({self.y}), week({self.x}), date({self.date}) at point {super().__repr__()}\"", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def ordinal(value):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n t = (P_('0', 'th'),\r\n P_('1', 'st'),\r\n P_('2', 'nd'),\r\n P_('3', 'rd'),\r\n P_('4', 'th'),\r\n P_('5', 'th'),\r\n P_('6', 'th'),\r\n P_('7', 'th'),\r\n P_('8', 'th'),\r\n P_('9', 'th'))\r\n if value % 100 in (11, 12, 13): # special case\r\n return \"%d%s\" % (value, t[0])\r\n return '%d%s' % (value, t[value % 10])", "def format(self, dt, force_date=False):\n if isinstance(dt, datetime.datetime) and not force_date:\n return dt.strftime('%y/%m/%d %H:%M')\n else:\n return self.format_date(dt)", "def __str__(self) -> str:\n sign = '-' if self.negative else '+'\n return f'UTC{sign}{self.hours}:{self.minutes:>02}'", "def date2trost(datetime_date):\n # '0>2' means: add leading zero(s) if the int is less than two digits long\n return '{0}-{1:0>2}-{2:0>2}'.format(datetime_date.year, datetime_date.month, datetime_date.day)", "def humanize_day(day_num):\n if 11 <= day_num <= 13:\n suffix = 'th'\n else:\n r = day_num % 10\n if r == 1:\n suffix = 'st'\n elif r == 2:\n suffix = 'nd'\n elif r == 3:\n suffix = 'rd'\n else:\n suffix = 'th'\n return str(day_num) + suffix", "def format_datetime(self, datetime):\n return datetime.isoformat()", "def __str__(self):\n y, m, d = self._year, self._month, self._day\n h, mn, s, t = self._hour, self._minute, self._second, self._tz\n if s == int(s):\n # A whole number of seconds -- suppress milliseconds.\n return '%4.4d/%2.2d/%2.2d %2.2d:%2.2d:%2.2d %s' % (\n y, m, d, h, mn, s, t)\n else:\n # s is already rounded to the nearest microsecond, and\n # it's not a whole number of seconds. Be sure to print\n # 2 digits before the decimal point.\n return '%4.4d/%2.2d/%2.2d %2.2d:%2.2d:%06.6f %s' % (\n y, m, d, h, mn, s, t)", "def format_time_sortkey(self, data):\n return self.input['start_time'].time().strftime('%H%M').lstrip('0')", "def format_date(time=False):\n\n return arrow.get(time).format('DD-MM-YYYY')", "def get_nicedate(self):\n if self.valid is None:\n return \"(unknown issuance time)\"\n localts = self.valid\n fmt = \"%b %-d, %H:%M UTC\"\n if self.tz is not None:\n localts = self.valid.astimezone(self.tz)\n # A bit of complexity as offices may not implement daylight saving\n if self.z.endswith(\"ST\") and localts.dst():\n localts -= datetime.timedelta(hours=1)\n fmt = \"%b %-d, %-I:%M %p \" + self.z\n return localts.strftime(fmt)", "def _external_time_format(int_time):\n simple_iso_time = True\n if simple_iso_time:\n ext_time = int_time.replace(tzinfo=SimpleUtc()).isoformat()\n else:\n ext_time = int_time.isoformat() + \"Z\"\n return ext_time", "def ordinal(self, day):\n teen_numbers = [11, 12, 13, 14, 15, 16, 17, 18, 19]\n output = ['th','st', 'nd', 'rd', 'th', 'th',\n 'th', 'th', 'th', 'th', 'th']\n if int(day) in teen_numbers:\n return (day + 'th')\n else:\n return (day + output[int(day[-1])])", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()" ]
[ "0.5434197", "0.5268163", "0.51410455", "0.50320715", "0.50300586", "0.50202215", "0.4939225", "0.49258694", "0.49249548", "0.4912397", "0.48663875", "0.48466504", "0.48272082", "0.48239732", "0.48088363", "0.4807816", "0.47984192", "0.47326642", "0.47113737", "0.47053987", "0.46987113", "0.46722093", "0.4663283", "0.4616798", "0.46120238", "0.45995462", "0.45917428", "0.45860073", "0.45816165", "0.4577133" ]
0.60907304
0
Constructs a TimePoint from unixTime, the number of seconds since the time origin.The resulting time is in UTC. This method uses python's gmtime(0) to obtain the Unix origin time.
def FromUnixTime(cls, unixTime): utcTuple = pytime.gmtime(0) t, overflow = Time.FromStructTime(utcTuple).Offset(seconds=unixTime) d = Date.FromStructTime(utcTuple).Offset(days=overflow) return cls(date=d, time=t.WithZone(zDirection=0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds", "def unixtime(self):\n return time.mktime(\n (self.year or 0, self.month or 0, self.day or 0, self.hour or 0, self.minute or 0, self.second or 0,\n self.dow or 0, self.doy or 0)\n )", "def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)", "def getUnixTimeStamp():\n return calendar.timegm(datetime.utcnow().utctimetuple())", "def unixtime(self,current_datetime):\n unixtime = time.mktime(current_datetime.timetuple())\n return unixtime", "def getUnixTime(utc_time):\n\ttemp=time.strptime(utc_time, \"%a %b %d %H:%M:%S +0000 %Y\")\n\treturn calendar.timegm(temp)", "def getUnixTime(pool=\"time.apple.com\"):\n time_offset = ntplib.NTPClient().request(pool).offset\n return float(time.time()+time_offset)", "def utcfromtimestamp(unix_epoch_timestamp):\n (y, m, d, hour, min, sec) = time.gmtime(unix_epoch_timestamp)[:6]\n return datetime.datetime(y, m, d, hour, min, sec, 0, UTC_TZ)", "def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()", "def get_current_unix_time():\n # https://timanovsky.wordpress.com/2009/04/09/get-unix-timestamp-in-java-python-erlang/\n current_time = time.time()\n timestamp = int(current_time *1000)\n return timestamp", "def timestampfromutc(utc):\n return (utc - datetime(1970, 1, 1)).total_seconds()", "def unix_timestamp():\n return int(time.time())", "def utcTime():\r\n return calendar.timegm(time.gmtime())", "def unix_timestamp_date(unix=1459141485):\n return datetime.datetime.fromtimestamp(int(unix))", "def unix_to_localtime(t, tz=\"US/Eastern\"):\n\n from datetime import datetime\n from pytz import timezone\n import pytz\n\n utc = pytz.utc\n tz = timezone(tz)\n\n timestamp = datetime.utcfromtimestamp(t)\n\n return(utc.localize(timestamp).astimezone(tz).strftime(\"%H:%M:%S\"))", "def unix_to_timestamp(unix):\n return int(round(unix * 1e6))", "def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)", "def unixTimeMs(dateAndTime):\n dateAndTime = dateAndTime + datetime.timedelta(hours=HOUR_ADJUSTMENT)\n return int((dateAndTime - EPOCH).total_seconds() * 1000.0)", "def getUnix(gpsTime):\n\tyr = int(gpsTime[0:4])\n\tmon = int(gpsTime[5:7])\n\tmday = int(gpsTime[8:10])\n\thour = int(gpsTime[11:13])+D.tzOffset+D.dst\t# account for offset due to timezones\n\tmin = int(gpsTime[14:16])\n\tsec = int(gpsTime[17:19])\n\twday = -1\t\t# weekday: -1 for don't know.\n\tyday = -1\t\t# day of the year: -1 for don't know\n\tisdst = 0 \t\t# daylight savings. doesn't seem to have effect. Add in hour\n\tunix = time.mktime((yr,mon,mday,hour,min,sec,wday,yday,isdst))\n\t# print time.localtime(unix)\n\treturn unix", "def timestamp(self):\n if self._tzinfo is None:\n s = self._mktime()\n return s + self.microsecond / 1e6\n else:\n return (self - _EPOCH).total_seconds()", "def from_unix(cls, seconds, milliseconds=0):\n return datetime.datetime.fromtimestamp(seconds + milliseconds * .001)", "def datetime2UnixTime(dt):\n\n # UTC unix timestamp\n unix_timestamp = (dt - datetime(1970, 1, 1)).total_seconds()\n\n return unix_timestamp", "def iso_from_unix_time(unix_time: float, precision: int = 9) -> ISOTimestamp:\n frac_part, int_part = math.modf(unix_time)\n\n seconds = int(int_part)\n\n if frac_part < 0:\n seconds -= 1\n frac_part += 1\n\n decimals = f\"{{0:.{precision}f}}\".format(frac_part)[1:].rstrip('0.') # noqa\n\n return _from_unix_time(seconds, decimals)", "def unixTime2Date(ts, tu, dt_obj=False):\n\n # Convert the UNIX timestamp to datetime object\n dt = datetime.utcfromtimestamp(float(ts) + float(tu)/1000000)\n\n\n if dt_obj:\n return dt\n\n else:\n return dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, float(tu)/1000", "def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)", "def Now():\n ut = (datetime.datetime.utcnow() - _EPOCH).total_seconds() / 86400.0\n return Time(ut)", "def set_timepoints(self):\n unixtime = self.created.timestamp() # float\n self.timepoints = unixtime + self.points # TODO: calc a sort value!", "def utctime(self) -> datetime:\n return datetime.utcfromtimestamp(float(self.ns_since_epoch) / 1e9)", "def orig_time(self) -> float:\n return ntp_to_system_time(self.orig_timestamp)", "def utcfromtimestamp(cls, t):\n return cls._fromtimestamp(t, True, None)" ]
[ "0.67560035", "0.6652053", "0.66441214", "0.6569114", "0.6303727", "0.6195301", "0.6144796", "0.6135113", "0.60965097", "0.60522974", "0.6036487", "0.6026946", "0.5911802", "0.5905643", "0.58966154", "0.5851556", "0.58437127", "0.57818604", "0.57163656", "0.57139033", "0.5700102", "0.567968", "0.56419563", "0.5600038", "0.5597317", "0.5587332", "0.55830824", "0.5568611", "0.55670047", "0.55640614" ]
0.706338
0
Returns a unix time value representing this time point.
def get_unixtime(self): if not self.Complete(): raise DateTimeError("get_unixtime requires complete timepoint") zoffset = self.time.GetZoneOffset() if zoffset is None: raise DateTimeError("get_unixtime requires timezone") elif zoffset == 0: zt = self else: zt = self.ShiftZone(zDirection=0) days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay() seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds() return 86400 * days + seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unixtime(self):\n return time.mktime(\n (self.year or 0, self.month or 0, self.day or 0, self.hour or 0, self.minute or 0, self.second or 0,\n self.dow or 0, self.doy or 0)\n )", "def getUnixTimeStamp():\n return calendar.timegm(datetime.utcnow().utctimetuple())", "def timestamp(self):\n if self._tzinfo is None:\n s = self._mktime()\n return s + self.microsecond / 1e6\n else:\n return (self - _EPOCH).total_seconds()", "def timestamp(self):\n # this only returns second precision, which is why we don't use it\n #now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())\n\n # this returns microsecond precision\n # http://bugs.python.org/msg180110\n epoch = datetime.datetime(1970, 1, 1)\n return (self - epoch).total_seconds()", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def __get_timeval():\n return convert_timeval(time.time())", "def get_current_unix_time():\n # https://timanovsky.wordpress.com/2009/04/09/get-unix-timestamp-in-java-python-erlang/\n current_time = time.time()\n timestamp = int(current_time *1000)\n return timestamp", "def unixtime(self,current_datetime):\n unixtime = time.mktime(current_datetime.timetuple())\n return unixtime", "def unix_timestamp():\n return int(time.time())", "def get_timeval():\n return convert_timeval(time.time())", "def timestamp_normalized(self):\r\n if not self.timestamp:\r\n return None\r\n\r\n if isinstance(self.timestamp, (int, long)):\r\n return self.timestamp\r\n\r\n if isinstance(self.timestamp, timedelta):\r\n tmp = datetime.now() + self.timestamp\r\n else:\r\n tmp = self.timestamp\r\n\r\n return long(time.mktime(tmp.timetuple()) * 1e+6 + tmp.microsecond)", "def time(self):\n return parse_time(self['timestamp'])", "def getTime(self) -> float:\n return self.t", "def timestamp(self, t):\n if isinstance(t, datetime):\n t = time.mktime(t.timetuple())\n return t - 631065600", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def get_time(self) -> float:\n raise NotImplementedError()", "def getUnixTime(pool=\"time.apple.com\"):\n time_offset = ntplib.NTPClient().request(pool).offset\n return float(time.time()+time_offset)", "def get_timestamp():\n\n # Convert timestamp to int after multiply by 1000 to get millisecond timestamp in int.\n return int(time.time() * 1000)", "def timestamp(self) -> datetime.datetime.timestamp:\n timestamp = datetime.datetime.utcfromtimestamp(int(self._timestamp) / 1000)\n return timestamp", "def gettime(self):\n return self.t", "def get_time(self):\n return self._current_time_sec", "def get_current_unix_timestamp_ms():\r\n return int(datetime.timestamp(datetime.now())) * 1000", "def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()", "def timeTime(self):\n return self._micros / 1000000.0", "def get_time(self):\n return self.time", "def _get_current_epoch_time() -> float:\n return time.time()", "def get_time(self):\n return self._ticks", "def __get_unix_now(self):\n ms_now = int(time.time() * 10 ** 5)\n return hex(ms_now)[2:]", "def time(self) -> int:\n return self.raw[\"time\"]", "def get_time(self):\n return self.__time" ]
[ "0.79627705", "0.7222582", "0.72103274", "0.7181688", "0.71629786", "0.7128564", "0.7111882", "0.71087086", "0.70909053", "0.70893455", "0.6969811", "0.6939169", "0.69360566", "0.68968153", "0.68921065", "0.6858193", "0.68465495", "0.68135035", "0.6807116", "0.6783193", "0.6773907", "0.67221785", "0.6710371", "0.6687749", "0.66661084", "0.66647327", "0.6652643", "0.66470003", "0.6639208", "0.66307306" ]
0.76794666
1
Constructs a TimePoint from the current local date and time.
def FromNow(cls): t = pytime.time() localTime = pytime.localtime(t) return cls.FromStructTime(localTime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def now(cls):\n return DateTime(*time.localtime())", "def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)", "def now(cls, tz=None):\n t = _time.time()\n return cls.fromtimestamp(t, tz)", "def local_time(self, local_time: SmartNvmeLocalTime):\n\n self._local_time = local_time", "def local_time(self) -> SmartNvmeLocalTime:\n return self._local_time", "def timepoints(self, first_date=None, last_date=None, today=None):\n self._data.timepoints(first_date=first_date, last_date=last_date, today=today)\n self._init_trackers()", "def createFromPoint(cls, point, **kwargs):\n return cls(point.x, point.y, **kwargs)", "def fromtimestamp(cls, t):\n y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)\n return cls(y, m, d)", "def setTimepoint(self, tp):\n\t\tpass", "def __init__(self, hour, minute=0, second=0, microsecond=0, tzinfo=None):", "def getPointTrajectory(self, localPt: Vector3) -> Trajectory:\n return Trajectory(self.times,[so3.apply(m,localPt) for m in self.milestones])", "def local_time():\n return datetime.datetime.now().isoformat()[:len('2017-01-24T10:44:00')]", "def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):", "def now(cls, tz=None):\n return datetime()", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def earliestTime(self):\n return self.__class__(\n self._year, self._month, self._day, 0, 0, 0, self._tz)", "def Now():\n ut = (datetime.datetime.utcnow() - _EPOCH).total_seconds() / 86400.0\n return Time(ut)", "def fromPoint(cls, p: Point):\n return cls(p.coords)", "def setup_datetime(self):\n \n current_date_time = datetime.now()\n timezone_diference = timedelta(hours=-3)\n return timezone(timezone_diference), current_date_time", "def make_current():\n current = datetime.datetime.now()\n hour = '{:02d}'.format(current.hour)\n minute = '{:02d}'.format(current.minute)\n second = '{:02d}'.format(current.second)\n current_time = hour + minute + second\n return current_time", "def timetz(self):\n return time(\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self._tzinfo,\n fold=self.fold,\n )", "def now(cls):\n return cls(2017, 4, 27,\n 13, 25, 46, 629282)", "def timestampfromlocal(local):\n return local.timestamp()", "def get_local_date(self):\n tz = pytz.timezone(self.timezone)\n utc = pytz.utc\n utc_now = datetime.datetime.now(utc)\n return utc_now.astimezone(tz).date()", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def today(cls):\n t = _time.time()\n return cls.fromtimestamp(t)", "def start_time(self):\n start_time = self.cache.get('start_time')\n if start_time is not None:\n return DatePoint.unfreeze(start_time)", "def today(cls):\n timestamp = time.localtime()\n return Date(timestamp[0], timestamp[1], timestamp[3], timestamp[6], timestamp[7])", "def get_point_at(self, t):\n segment = self.get_segment_for_time(t)\n return segment.point_at(t)", "def get_datetime(ts, local_time=True):\n tsf = float(ts) / 1000\n timev = time.localtime(tsf) if local_time else time.gmtime(tsf)\n dt = datetime.datetime.fromtimestamp(time.mktime(timev))\n return dt" ]
[ "0.6122709", "0.5958908", "0.5932133", "0.5771543", "0.5771211", "0.5714865", "0.568802", "0.5671255", "0.56375754", "0.5602565", "0.5585798", "0.55640185", "0.555307", "0.5513492", "0.55076015", "0.5497981", "0.5484358", "0.5479155", "0.54750097", "0.54229313", "0.5403783", "0.54000324", "0.5399649", "0.5381697", "0.53686625", "0.5354513", "0.5347574", "0.5336979", "0.53189003", "0.531755" ]
0.6744919
0
Constructs a TimePoint from the current UTC date and time.
def FromNowUTC(cls): t = pytime.time() utcTime = pytime.gmtime(t) return cls.FromStructTime(utcTime).WithZone(zDirection=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)", "def FromNow(cls):\n t = pytime.time()\n localTime = pytime.localtime(t)\n return cls.FromStructTime(localTime)", "def utcfromtimestamp(cls, t):\n return cls._fromtimestamp(t, True, None)", "def now(cls, tz=None):\n t = _time.time()\n return cls.fromtimestamp(t, tz)", "def now(cls):\n return DateTime(*time.localtime())", "def __init__(self, hour, minute=0, second=0, microsecond=0, tzinfo=None):", "def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)", "def datetime_utc_now() -> datetime:\n return datetime.now(timezone.utc)", "def setup_datetime(self):\n \n current_date_time = datetime.now()\n timezone_diference = timedelta(hours=-3)\n return timezone(timezone_diference), current_date_time", "def nowUTC():\n return datetime.datetime.now(pytz.utc)", "def now_utc() -> datetime:\n return datetime.now(timezone.utc)", "def fromutc(self, dt):\n if not isinstance(dt, real_datetime):\n raise TypeError(\"fromutc() requires a datetime argument\")\n if dt.tzinfo is not self:\n raise ValueError(\"dt.tzinfo is not self\")\n\n dtoff = dt.utcoffset()\n if dtoff is None:\n raise ValueError(\"fromutc() requires a non-None utcoffset() \" \"result\")\n\n # See the long comment block at the end of this file for an\n # explanation of this algorithm.\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\"fromutc() requires a non-None dst() result\")\n delta = dtoff - dtdst\n if delta:\n dt += delta\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\n \"fromutc(): dt.dst gave inconsistent \" \"results; cannot convert\"\n )\n return dt + dtdst", "def now(cls, tz=None):\n return datetime()", "def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)", "def createFromPoint(cls, point, **kwargs):\n return cls(point.x, point.y, **kwargs)", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def now_dt(tz='UTC'):\n if tz != 'UTC':\n raise NotImplementedError()\n return datetime.datetime.utcnow().replace(tzinfo = pytz.utc)", "def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):", "def utcdatetime(self):\n utc = self.toZone('UTC')\n second = int(utc._second)\n microsec = utc.micros() % 1000000\n dt = datetime(utc._year, utc._month, utc._day, utc._hour,\n utc._minute, second, microsec)\n return dt", "def timetz(self):\n return time(\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self._tzinfo,\n fold=self.fold,\n )", "def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)", "def datetime_utcnow() -> datetime:\n return datetime.now(tz=pytz.timezone('UTC'))", "def utc():\n return date_from_utc(dt.utcnow())", "def utctimetuple(self):\n offset = self.utcoffset()\n if offset:\n self -= offset\n y, m, d = self.year, self.month, self.day\n hh, mm, ss = self.hour, self.minute, self.second\n return _build_struct_time(y, m, d, hh, mm, ss, 0)", "def earliestTime(self):\n return self.__class__(\n self._year, self._month, self._day, 0, 0, 0, self._tz)", "def dt(*args, **kwargs):\n \n if 'tz' in kwargs:\n tzinfo = kwargs.pop('tz')\n else:\n tzinfo = kwargs.pop('tzinfo', None)\n \n offset_s = kwargs.pop('offset_s', None) \n trustme = kwargs.pop('trustme', False)\n \n if kwargs:\n raise Exception('Unhandled arg: \"{}\".'.format(kwargs))\n \n if (tzinfo is None):\n # Force UTC if None\n timezone = timezonize('UTC')\n \n else:\n timezone = timezonize(tzinfo)\n \n if offset_s:\n # Special case for the offset\n from dateutil.tz import tzoffset\n if not tzoffset:\n raise Exception('For ISO date with offset please install dateutil')\n time_dt = datetime.datetime(*args, tzinfo=tzoffset(None, offset_s))\n else:\n # Standard timezone\n time_dt = timezone.localize(datetime.datetime(*args))\n\n # Check consistency \n if not trustme and timezone != pytz.UTC:\n if not check_dt_consistency(time_dt):\n raise ValueError('Sorry, time {} does not exists on timezone {}'.format(time_dt, timezone))\n\n return time_dt", "def setTimepoint(self, tp):\n\t\tpass", "def now(cls):\n return cls(2017, 4, 27,\n 13, 25, 46, 629282)", "def utc_now():\n return datetime.now(tz=timezone.utc)", "def utcnow() -> datetime.datetime:\n return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)" ]
[ "0.61733437", "0.60582864", "0.60350686", "0.589435", "0.583186", "0.5800294", "0.57941496", "0.57737136", "0.5772167", "0.5748418", "0.5730196", "0.56757534", "0.56599766", "0.5637395", "0.56094176", "0.56089145", "0.5587311", "0.5582854", "0.55539644", "0.5523434", "0.5472167", "0.54487014", "0.54452366", "0.5424531", "0.5405308", "0.5397882", "0.5396879", "0.5389317", "0.5378078", "0.5362543" ]
0.7068866
0
Returns a tuple of (value, formatString) or (None,None). formatString is one of "n", "n.n" or "n,n". If allowFraction is False then a fractional format raises an error.
def ParseDurationValue(self, allowFraction=True): value = self.ParseDIGITRepeat() if value is None: return None, None if self.the_char in ".,": if not allowFraction: raise DateTimeError( "fractional component in duration must have lowest order") format = "n" + self.the_char + "n" value = value + self.ParseFraction() else: format = "n" return value, format
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frac(num_str, obj_mode=False):\n if obj_mode == False:\n cast_frac = lambda inp: float(Fraction(inp))\n elif obj_mode == True:\n cast_frac = Fraction\n try:\n return cast_frac(num_str)\n except ValueError:\n if num_str == '':\n return cast_frac('0/1')\n elif num_str == '?':\n return np.nan", "def _get_compact_format(\n number: float | decimal.Decimal | str,\n compact_format: LocaleDataDict,\n locale: Locale,\n fraction_digits: int,\n) -> tuple[decimal.Decimal, NumberPattern | None]:\n if not isinstance(number, decimal.Decimal):\n number = decimal.Decimal(str(number))\n if number.is_nan() or number.is_infinite():\n return number, None\n format = None\n for magnitude in sorted([int(m) for m in compact_format[\"other\"]], reverse=True):\n if abs(number) >= magnitude:\n # check the pattern using \"other\" as the amount\n format = compact_format[\"other\"][str(magnitude)]\n pattern = parse_pattern(format).pattern\n # if the pattern is \"0\", we do not divide the number\n if pattern == \"0\":\n break\n # otherwise, we need to divide the number by the magnitude but remove zeros\n # equal to the number of 0's in the pattern minus 1\n number = cast(decimal.Decimal, number / (magnitude // (10 ** (pattern.count(\"0\") - 1))))\n # round to the number of fraction digits requested\n rounded = round(number, fraction_digits)\n # if the remaining number is singular, use the singular format\n plural_form = locale.plural_form(abs(number))\n if plural_form not in compact_format:\n plural_form = \"other\"\n if number == 1 and \"1\" in compact_format:\n plural_form = \"1\"\n format = compact_format[plural_form][str(magnitude)]\n number = rounded\n break\n return number, format", "def frac(amount, limit=100):\n frac = Fraction(amount).limit_denominator(limit)\n frac_double = frac.numerator / frac.denominator\n\n try:\n frac_diff = frac_double - amount\n except TypeError: # amount is a string\n amt = float(amount)\n frac_diff = frac_double - amt\n relative_diff = frac_diff / amt\n else:\n relative_diff = frac_diff / amount\n\n return (frac, relative_diff)", "def test_prepare_value_string(self):\n field = DecimalFractionField()\n result = field.prepare_value(\"1/4\")\n self.assertEqual(\"1/4\", result)\n\n result = field.prepare_value(\"1 1/4\")\n self.assertEqual(\"1 1/4\", result)", "def parseFraction(f):\n p = f.find(\"/\")\n if p < 1:\n return None\n s1 = f[:p]\n s2 = f[p+1:]\n try:\n v1 = int(s1)\n v2 = int(s2)\n except ValueError:\n return None\n if v2:\n return 1.0 * v1 / v2\n else:\n return None", "def _format(val, valtype, floatfmt, intfmt, missingval=\"\", has_invisible=True): # noqa\n if val is None:\n return missingval\n\n if valtype is str:\n return f\"{val}\"\n elif valtype is int:\n return format(val, intfmt)\n elif valtype is bytes:\n try:\n return str(val, \"ascii\")\n except (TypeError, UnicodeDecodeError):\n return str(val)\n elif valtype is float:\n is_a_colored_number = has_invisible and isinstance(val, (str, bytes))\n if is_a_colored_number:\n raw_val = _strip_ansi(val)\n formatted_val = format(float(raw_val), floatfmt)\n return val.replace(raw_val, formatted_val)\n else:\n return format(float(val), floatfmt)\n else:\n return f\"{val}\"", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def test_prepare_value_string(self):\n field = FractionField()\n result = field.prepare_value(\"1/4\")\n self.assertEqual(\"1/4\", result)\n\n result = field.prepare_value(\"1 1/4\")\n self.assertEqual(\"1 1/4\", result)", "def testnegfrac_vs_frac ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, fracTup2 in self.knownNegfrac_vs_fracValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) )\r\n\t\t\tfrac2 = eval ( r.sub ( 'frac.frac', fracTup2 ) )\r\n\t\t\tself.assertEqual ( frac1.toString (), frac2.toString () )", "def format_result(wf, converted_rate, decimal_places=4):\n fmt_val = locale.format('%%.%if' % decimal_places, converted_rate, True, True)\n\n # User divisor\n divisor = wf.settings.get(SETTINGS_DEFAULT_NUMBER_DIVISOR, '.')\n\n try:\n locale_divisor = locale.localeconv().get('decimal_point')\n except:\n # Numero de casas decimais pra pegar o divisor\n locale_divisor = fmt_val[-decimal_places]\n\n # when there are no decimal places, i don't format the number\n return fmt_val if decimal_places == 0 else fmt_number(fmt_val, divisor, locale_divisor)", "def pretty_float(v: Union[float, int], n_sigfigs: Optional[int] = 5) -> str:\n\t\tv0 = v.__class__(v)\n\t\t# first, handle NaN and infinities\n\t\tif np.isneginf(v):\n\t\t\treturn Chars.minus + Chars.inf\n\t\telif np.isposinf(v):\n\t\t\treturn '+' + Chars.inf\n\t\telif np.isnan(v):\n\t\t\treturn Chars.null\n\t\t# sweet. it's a regular float or int.\n\t\tisint = isinstance(v, int)\n\t\tif isint:\n\t\t\tv = int(round(v))\n\t\tif n_sigfigs is None:\n\t\t\ts = StringTools.strip_empty_decimal(str(v))\n\t\telse:\n\t\t\ts = str(float(str(('%.{}g'.format(n_sigfigs)) % v)))\n\t\t# remove the .0 if the precision doesn't support it\n\t\t# if v >= 1 and n_sigfigs<2, it couldn't have a decimal\n\t\t# and if n_sigfigs<1, it definitely can't\n\t\tif isint or n_sigfigs is not None and v0>=1 and n_sigfigs<2 or n_sigfigs is not None and n_sigfigs < 1:\n\t\t\ts = StringTools.strip_empty_decimal(s)\n\t\t# and ... %g does this.\n\t\tif s.startswith('.'):\n\t\t\ts = '0' + s\n\t\t# prepend + or -\n\t\tif s.startswith('-'):\n\t\t\treturn Chars.minus + s[1:]\n\t\telse:\n\t\t\treturn '+' + s", "def _get_format(value, quote_mode='always'):\n\n formats = {'always': '{key}=\"{value}\"\\n', 'auto': '{key}={value}\\n'}\n\n if quote_mode not in formats.keys():\n return KeyError(f'quote_mode {quote_mode} is invalid')\n\n _mode = quote_mode\n if quote_mode == 'auto' and ' ' in value:\n _mode = 'always'\n return formats.get(_mode)", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def float_format(self):\n ...", "def test_prepare_value_limit_denominator(self):\n field = DecimalFractionField(limit_denominator=3)\n result = field.prepare_value(Decimal(1 / 3.0))\n self.assertEqual(\"1/3\", result)", "def _format_field_val(\n self,\n field: str,\n field_type: str,\n value: Any,\n ) -> str | int | bool | list | None:\n\n # If the field is empty, no need to format.\n if value is None:\n return None\n\n # TODO(DanielRyanSmith): Write checks to ensure enum values are valid.\n if field_type == 'emails' or field_type == 'split_str':\n list_val = self._split_list_input(field, field_type, value, ',')\n if field == 'blink_components' and len(value) == 0:\n return [settings.DEFAULT_COMPONENT]\n return list_val\n elif field_type == 'link':\n return self._extract_link(value)\n elif field_type == 'links':\n list_val = self._split_list_input(field, field_type, value)\n # Filter out any URLs that do not conform to the proper pattern.\n return [self._extract_link(link)\n for link in list_val if link]\n elif field_type == 'int':\n # Int fields can be unset by giving null or nothing in the input field.\n if value == '' or value is None:\n return None\n try:\n return int(value)\n except ValueError:\n self._abort_invalid_data_type(field, field_type, value)\n elif field_type == 'bool':\n return bool(value)\n return str(value)", "def format_decimal(value, mask):\n if mask is None:\n return str(value)\n return \"{:d}/{:d}\".format(value, mask)", "def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")", "def extension (formatStr):\n assert False, \"TODO:\"", "def get_format(fstr):\n fstr = fstr.lower() # support uppercase letters\n if os.sep in fstr:\n fstr = fstr.split(os.sep)[-1]\n try:\n fname, ext = fstr.split(\".\", 1)\n except:\n fname, ext = (\"\", \"\")\n\n if ext.startswith(\"bm\"):\n return FORMAT_BMP\n elif ext == \"txt\":\n return FORMAT_CAR\n elif ext == \"ta.csv\":\n return FORMAT_TA_CSV\n elif ext == \"fin\":\n return FORMAT_FIN\n elif ext == \"hul\":\n return FORMAT_HUL\n elif ext in [\"ncp\"]:\n return FORMAT_NCP\n elif ext in [\"prm\", \"m\"]:\n return FORMAT_PRM\n elif ext == \"rim\":\n return FORMAT_RIM\n elif ext == \"w\":\n return FORMAT_W\n else:\n return FORMAT_UNK", "def format_value(value):\n if value is None:\n return \"NaN\"\n else:\n return float(value)", "def pretty_float_formatter(field, ndigits=None):\n def _formatter(obj):\n value = obj[field] if isinstance(obj, dict) else getattr(obj, field)\n if type(value) in (int, float):\n if ndigits:\n return round(value, ndigits)\n return value\n return \"n/a\"\n return _formatter", "def get_format(self):\n pass", "def getfloat(self, fraction) -> float:\n self.numerator_a = fraction.numerator_a\n self.denominator_b = fraction.denominator_b\n self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)\n return super().__float__()", "def get_value ( self, object ):\n try:\n if self.format_func is not None:\n return self.format_func( self.get_raw_value( object ) )\n\n return self.format % ( self.get_raw_value( object ), )\n except:\n logger.exception( 'Error occurred trying to format a %s value' %\n self.__class__.__name__ )\n return 'Format!'", "def sensible_format_data(self, value):\n if abs(value) > 1e4 or abs(value)<1e-3:\n s = '%1.4e' % value\n return self._formatSciNotation(s)\n else:\n return '%4.3f' % value", "def get_format(self):\n return self._format[0]", "def format(value, arg):\n try:\n if value is not None:\n # return (str(arg)) % value\n return (str(value)) % arg\n else:\n return \"\"\n except (ValueError, TypeError):\n return \"\"", "def get(self, get_params, block):\n value = f\"{{{self.key}}}\"\n try:\n value = value_ = get_params(self.key)\n if self.format.startswith(\":\"):\n # if a parameter has been set to be formatted as a numeric\n # type then we see if we can coerce it to be. This allows\n # the user to format types that normally would not be\n # allowed eg '123' it also allows {:d} to be used as a\n # shorthand for {:.0f}. Use {:g} to remove insignificant\n # trailing zeroes and the decimal point too if there are\n # no remaining digits following it. If the parameter cannot\n # be successfully converted then the format will be removed.\n try:\n if \"escape\" in self.format:\n value = escape(value)\n if \"ceil\" in self.format:\n value = ceil(float(value))\n if \"f\" in self.format:\n value = float(value)\n if \"g\" in self.format:\n value = float(value)\n if \"d\" in self.format:\n value = int(float(value))\n output = f\"{{[{self.key}]{self.format}}}\"\n value = output.format({self.key: value})\n value_ = float(value)\n except ValueError:\n pass\n elif self.format.startswith(\"!\"):\n output = f\"{{{self.key}{self.format}}}\"\n value = value_ = output.format(**{self.key: value})\n\n if block.commands.not_zero:\n valid = value_ not in [\"\", None, False, \"0\", \"0.0\", 0, 0.0]\n else:\n # '', None, and False are ignored\n # numbers like 0 and 0.0 are not.\n valid = not (value_ in [\"\", None] or value_ is False)\n enough = False\n except: # noqa e722\n # Exception raised when we don't have the param\n enough = True\n valid = False\n\n return valid, value, enough", "def test_prepare_value_limit_denominator(self):\n field = FractionField(limit_denominator=3)\n result = field.prepare_value(Decimal(1 / 3.0))\n self.assertEqual(\"1/3\", result)" ]
[ "0.5516464", "0.52817243", "0.5185193", "0.51794326", "0.5139217", "0.50429684", "0.5020873", "0.4991828", "0.4948119", "0.49453548", "0.49012697", "0.48802477", "0.48762563", "0.4845734", "0.48277193", "0.48180923", "0.4814494", "0.4786338", "0.4773011", "0.4745563", "0.47429416", "0.4737173", "0.47254056", "0.47184917", "0.47157893", "0.47150928", "0.47141185", "0.47113279", "0.47000697", "0.4682086" ]
0.57598084
0
This function splits camel case into separate words
def camelCaseSplit(text): matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) return [m.group(0) for m in matches]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_uppercase(word):\r\n final_word = ''\r\n for i in word:\r\n final_word += ' %s' % i if i.isupper() else i\r\n\r\n return final_word.strip()", "def split_capital(phrase):\n format_capital = []\n #words = [phrase]\n #format_capital.append(words[0].isupper())\n \n #words = phrase.split()\n re_split_result = re.split('(\\W)', phrase)\n words = list(filter(None, re_split_result))\n \n for k_item in range(len(words)-1, 0, -1):\n while len(words[k_item])>0 and words[k_item][0] == ',':\n words[k_item-1] += ','\n words[k_item] = words[k_item][1:]\n for k_item in range(len(words)-1, 0, -1):\n while len(words[k_item])>0 and words[k_item][0] == ' ':\n words[k_item-1] += ' '\n words[k_item] = words[k_item][1:]\n \n remove_empty(words)\n \n for k_item in range(len(words)-1, 0, -1):\n if words[k_item][0] != ' ' and words[k_item-1][-1] != ' ':\n words[k_item-1] = ''.join([words[k_item-1], words[k_item]])\n del(words[k_item])\n \n flag_prev_word_capital = True\n for k in range(len(words)):\n if words[k].isupper() and flag_prev_word_capital:\n if k>0 and (words[k-1].rstrip(' ')[-1] != ',' and words[k-1].rstrip(' ')[-1] != ')'):\n format_capital.append(False)\n flag_prev_word_capital = False\n else:\n format_capital.append(True)\n \n else:\n format_capital.append(False)\n flag_prev_word_capital = False\n return words, format_capital", "def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText", "def normalize_camel(var_name):\n return [word.lower() for word in re.split(r'([A-Z][a-z]*)', var_name) if len(word) > 0]", "def un_camel_case(text, separator='_'):\n split = re.findall(r'(?:[A-Z][a-z0-9]*|[a-z0.9]+)', text)\n split = map(str.lower, split)\n split = list(split)\n\n words = []\n\n while len(split) > 0:\n word = split[0]\n split = split[1:]\n\n if len(word) == 1:\n while (len(split) > 0) and (len(split[0]) == 1):\n word += split[0]\n split = split[1:]\n\n words.append(word)\n\n return separator.join(words)", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def split_name(name):\n space_split = name.split(\" \")\n upper_case = \"\"\n lower_case = \"\"\n for s in space_split:\n first_letter = s[0]\n if first_letter.isupper():\n upper_case = upper_case.strip() + \" \" + s\n else:\n lower_case = lower_case.strip() + \" \" + s\n return (upper_case, lower_case)", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def titlecase(original: str, delimiter: str = \" \", small_words: list = None) -> str:\n _small_words = [\"of\", \"in\", \"at\", \"to\", \"the\", \"on\", \"an\", \"a\"]\n if small_words:\n _small_words = list(set(_small_words + small_words))\n\n original_splitted = original.split(delimiter)\n result = []\n\n for word in original_splitted:\n word = word.lower()\n if word in _small_words:\n result.append(word)\n else:\n result.append(word.capitalize())\n\n return delimiter.join(result)", "def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)", "def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()", "def filter_words(st):\n\n filtered = \" \".join(st.capitalize().split())\n return filtered", "def correctCasing(words):\n strings = words.split(' ')\n strings = [s[0].upper()+s[1:].lower() for s in strings if s]\n return ' '.join(strings)", "def separate_words(text):\n splitter = re.compile('\\\\W*')\n return [s.lower() for s in splitter.split(text) if s != '']", "def word_split_by_char(s):\n old_words = []\n old_words.append(s)\n result = []\n while len(old_words) > 0:\n new_words = []\n for s in old_words:\n if '-' in s: # Case: ab-cd-ef\n new_words+=s.split('-')\n elif '.' in s: # Case: ab.cd.ef\n new_words+=s.split('.')\n elif '_' in s: # Case: ab_cd_ef\n new_words+=s.split('_')\n elif '/' in s: # Case: ab/cd/ef\n new_words+=s.split('/')\n elif '\\\\' in s: # Case: ab\\cd\\ef\n new_words+=s.split('\\\\')\n else:\n t = camel_case_split(s)\n if len(t) > 1:\n new_words += t\n result.append(s)\n old_words = new_words\n return result", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def camel_case_split(identifier):\r\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z0-9])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\r\n return [m.group(0) for m in matches]", "def camel_case_split(identifier):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n return [m.group(0) for m in matches]", "def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def split_stem(sentence):\n sentence = re.sub('([a-z])([A-Z])', u'\\\\1 \\\\2', sentence)\n return sentence.split()", "def _camel_case_split(identifier: str) -> List[str]:\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n return [m.group(0) for m in matches]", "def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]", "def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])", "def split_by_case(text, case):\n\n if case == 'underscore':\n return filter(bool, text.split('_'))\n elif case == 'camel':\n text = text.replace('|', '||')\n text = re.sub(r'([a-z0-9])([A-Z])', r'\\1|\\2', text)\n text = re.sub(r'([A-Z]+)([A-Z])', r'\\1|\\2', text)\n return map(methodcaller('replace', '||', '|'), text.split('|'))\n return [text]", "def UnCamelCase(text, separator='_'):\n split = re.findall(r'[A-Z][a-z0-9]*', text)\n split = map(str.lower, split)\n split = list(split)\n\n words = []\n\n while len(split) > 0:\n word = split[0]\n split = split[1:]\n\n if len(word) == 1:\n while (len(split) > 0) and (len(split[0]) == 1):\n word += split[0]\n split = split[1:]\n\n words.append(word)\n\n return separator.join(words)", "def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)", "def break_words(stuff):\r\n words = stuff.split(' ')\r\n return words" ]
[ "0.7561148", "0.74302274", "0.7386459", "0.7364253", "0.7306393", "0.70641774", "0.70035785", "0.69703233", "0.69154686", "0.69137603", "0.69135076", "0.6885954", "0.6855707", "0.6846678", "0.68344367", "0.68254817", "0.68216276", "0.67984384", "0.679571", "0.6779888", "0.6772004", "0.6751485", "0.6737912", "0.67027766", "0.6661405", "0.665373", "0.66441226", "0.6630193", "0.66196054", "0.6611795" ]
0.74493116
1
List models in a project.
def list(self, project_id): endpoint = "/project/{}/model".format(project_id) return self._get(endpoint, _ModelSchema(many=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def models_list(request):\n projects = Project.objects.filter(models=1)\n return render(request, 'screenshower/app/models_list.html', {'projects': projects})", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def list(self, request):\n projects = Project.objects.all()\n\n serializer = ProjectSerializer(projects, many=True, context={'request': request}) # convert to json\n return Response(serializer.data)", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def get_projects():\n return Project.query.all()", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ListModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list_models(self, sort: bool = True, limit: int | None = None) -> Iterator[ExecutableModelSpace]:\n return self._strategy.list_models(sort=sort, limit=limit)", "def generate_model_list():\n\t\n\tmodels = [\n\t\tapi.v1.models.job.Job,\n\t]\n\treturn models", "async def list_models(\n list_models_request: ListModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/list_models endpoint\")\n logging.debug(f\"Request: {list_models_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().list_model_controller(\n request=list_models_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/list_models endpoint: {error}\")\n raise error", "def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response", "def list_dashdb_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='DashDB In-database Model', fields=fields)\n\t\treturn models", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def models(self):\n return self.config.models()", "def test_list_project(self):\n pass" ]
[ "0.79152936", "0.7396299", "0.71830803", "0.6951597", "0.6945092", "0.6849994", "0.67448294", "0.6668643", "0.6567901", "0.6498854", "0.64851725", "0.6481699", "0.64800274", "0.6413323", "0.64063305", "0.63729674", "0.63483864", "0.6324699", "0.6307377", "0.62619567", "0.62619567", "0.62423015", "0.6240684", "0.6228457", "0.61994624", "0.61492", "0.6147043", "0.61247945", "0.6119573", "0.6098322" ]
0.7942028
0
List the versions of a model in the registry.
def list_versions(self, project_id, model_id): endpoint = "/project/{}/model/{}/version".format(project_id, model_id) return self._get(endpoint, _ModelVersionSchema(many=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_list_versions(**kwargs):\n mle = MLEngineHook()\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = mle.list_versions(PROJECT, model_name)\n kwargs['ti'].xcom_push(key='model_versions', value=model_versions)", "def ListVersions(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ListModelVersions(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list_versions(self):\n version_url = self._get_base_version_url()\n\n resp, body = self.raw_request(version_url, 'GET')\n # NOTE: We need a raw_request() here instead of request() call because\n # \"list API versions\" API doesn't require an authentication and we can\n # skip it with raw_request() call.\n self._error_checker(resp, body)\n\n body = json.loads(body)\n self.validate_response(schema.list_versions, resp, body)\n return rest_client.ResponseBody(resp, body)", "def get_versions(self):\n raise NotImplementedError", "def versions(self):\n return self._versions", "def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []", "def index(self, request):\n versions = []\n for key, data in VERSIONS.items():\n v = BaseVersion(\n data[\"id\"],\n data[\"status\"],\n request.application_url,\n data[\"updated\"])\n versions.append(v)\n return wsgi.Result(VersionsDataView(versions))", "def all(self):\r\n if self._versions is None or \\\r\n len(self._versions) == 0:\r\n url = \"%s/versions\" % self._url\r\n params = {'f':'json'}\r\n res = self._con.get(url, params)\r\n self._versions = []\r\n if 'versions' in res:\r\n for v in res['versions']:\r\n guid = v['versionGuid'][1:-1]\r\n vurl = \"%s/versions/%s\" % (self._url, guid)\r\n self._versions.append(Version(url=vurl,\r\n flc=self._flc,\r\n gis=self._gis))\r\n return self._versions\r\n return self._versions", "def versions(self) -> pulumi.Output[List['outputs.RegionInstanceGroupManagerVersion']]:\n return pulumi.get(self, \"versions\")", "def test_list_versions(self):\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n\n res = self.metadata.get_list_entity_versions(\n entity=Dashboard, entity_id=res_name.id.__root__\n )\n assert res", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]", "def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))", "def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()", "def select_versions(self):\n return []", "def get_all_versions(self, headers=None, **params):\r\n return self._get_all([('Version', self.key_class),\r\n ('CommonPrefixes', Prefix),\r\n ('DeleteMarker', DeleteMarker)],\r\n 'versions', headers, **params)", "def revision_list():\n for rev in orm.DataRevision.select():\n click.echo(rev.name)", "def ListVersions(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def available_versions(self, **kwargs):\n return self.raw_version_data(**kwargs)", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def versions(self, name):\n if not len(self):\n self.update()\n return [version for version in self if os.path.basename(version) == name]", "def ListVersions(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def list_versions(self):\n if not USE_GCLOUD:\n return self.run_appcfg(['list_versions'])\n data = self.run_gcloud(['app', 'versions', 'list'])\n per_module = collections.defaultdict(list)\n for deployment in data:\n service = deployment['service'].encode('utf-8')\n version_id = deployment['id'].encode('utf-8')\n per_module[service].append(version_id)\n return dict(per_module)", "def list_versions(quartus_versions):\n for key in quartus_versions.keys():\n print(key)", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def list_object_versions(Bucket=None, Delimiter=None, EncodingType=None, KeyMarker=None, MaxKeys=None, Prefix=None, VersionIdMarker=None):\n pass", "def get_github_chandra_models_version_info():\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info", "def get_model_versions(cause_id, age_start, age_end, model_version_type_id):\n call = \"\"\"\n SELECT model_version_id FROM cod.model_version\n WHERE cause_id = {c}\n AND age_start = {a_start} AND age_end = {a_end}\n AND model_version_type_id = {mvt}\n AND gbd_round_id > 5 AND status = 1\n \"\"\".format(c=cause_id, a_start=age_start, a_end=age_end,\n mvt=model_version_type_id)\n model_versions = query(call, conn_def='codem')['model_version_id'].tolist()\n return model_versions", "def get_revision_list(self):\n response = self._get_request(\n DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)\n )\n self._handle_bad_response(response)\n revisions = yaml.safe_load(response.text)\n return revisions.get('results', [])" ]
[ "0.7476473", "0.6968806", "0.6945814", "0.6910091", "0.68492013", "0.67806196", "0.6705519", "0.66787547", "0.66321546", "0.6515155", "0.65119636", "0.6478975", "0.6458911", "0.6431183", "0.6391382", "0.63797146", "0.62804574", "0.61845875", "0.6180273", "0.6165664", "0.6153509", "0.61462086", "0.6107622", "0.609609", "0.60841864", "0.6029641", "0.6024058", "0.5979283", "0.5969203", "0.59603363" ]
0.7576148
0
Simple CLI for a NUmber Guessing Game (NGG CLI).
def main(): log("NGG CLI", color="green", figlet=True) log("Welcome to NGG CLI!", "yellow")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ng(l, b, i):\n def c(str):\n b.l_say(str, i)\n\n def p(str):\n b.l_say(str, i, 0)\n\n def help(l, b, i):\n p('%s=== Number Game Help Guide ===' % ORANGE)\n p('\\tTo start a new game, use %s.ng new%s. You will then be ' % (CYAN, RESET))\n p('\\tgiven an interval, everyone in the chat will be able to')\n p('\\tguess the number. Close guesses will result in ')\n p('\\tcumulative points, until the max amounts of points have')\n p('\\tbeen awarded. Use %s.ng guess <number> %sto guess, OR ' % (CYAN, RESET))\n p('\\tuse %s.ng play%s to enter a mode in which everything ' % (CYAN, RESET))\n p('\\tyou type in chat will be treated as a guess. Exit this ')\n p('\\tmode with %s.ng stop%s.' % (CYAN, RESET))\n p('%s=== End help ===' % ORANGE)\n return True\n\n def new(l, b, i):\n global GAME\n if GAME is not None:\n b.l_say('%sThere\\'s already a Number Game running!' % BOLD, i, 0)\n return True\n\n min = 0\n max = 1000\n dec = False\n if i.args is not None:\n for arg in i.args:\n if arg.startswith('-r='):\n min, max = arg.replace('-r=', '').split(',')\n min = int(min)\n max = int(max)\n\n elif arg.startswith('-d='):\n if 't' in arg.lower():\n dec = True\n str_dec = 'OFF'\n if dec:\n str_dec = 'ON'\n\n p('Creating new Number Game, with')\n p('range (%s%d%s, %s%d%s) and decimals %s%s%s.' \\\n % (CYAN, min, RESET, CYAN, max, RESET,\n CYAN, str_dec, RESET))\n GAME = Game(b, min, max, dec)\n\n def cancel(l, b, i):\n global GAME\n GAME = None\n p('Game stopped.')\n\n def guess(l, b, i):\n global GAME\n if GAME == None:\n p('There is no Number Game running at the moment.')\n return True\n\n if len(i.args) > 1:\n if '.' in i.args[1]:\n guess = float(i.args[1])\n else:\n guess = int(i.args[1])\n\n if not (isinstance(guess, float) or isinstance(guess, int)):\n p('You must guess a number, silly!')\n return True\n\n success = GAME.guess(i.nick, guess)\n if success == 'win':\n new_points = b.get_user(i.nick).get_points()\n p('New score: %s' % format(new_points, ',d'))\n GAME = None\n\n elif success == 'kill':\n GAME = None\n\n elif success == 'reward':\n new_points = b.get_user(i.nick).get_points()\n p('New score: %s' % format(new_points, ',d'))\n GAME = None\n\n else:\n p('You must guess something, silly!')\n\n def play(l, b, i):\n global PLAYERS\n PLAYERS.append(i.nick)\n p('You are now in %sPLAY%s mode.' % (BOLD, RESET))\n return True\n\n def stop(l, b, i):\n global PLAYERS\n PLAYERS.remove(i.nick)\n p('You are now %snot%s in %sPLAY%s mode.' % (BOLD, RESET, BOLD, RESET))\n return True\n\n\n\n\n #====================================================================#\n try:\n exec ('%s(l, b, i)' % i.args[0]) in globals(), locals()\n except Exception, e:\n traceback.print_exc()\n b.l_say('Usage: %s.ng new|guess|play|stop' % CYAN, i, 0)\n return True\n #====================================================================#", "def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)", "def cli() -> None:", "def cli() -> None:", "async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.6939428", "0.6246258", "0.60306823", "0.60306823", "0.5827886", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928", "0.5826928" ]
0.637351
1
test a shift by dx,dy using pan method
def test_pan(): _a = io.load_vec(os.path.join(path, f1)) _c = _a.piv.pan(1.0, -1.0) # note the use of .piv. assert np.allclose(_c.coords["x"][0], 1.312480) assert np.allclose(_c.coords["y"][0], -1.31248)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pan():\n _c = _a.copy()\n _c = _c.piv.pan(1.0, -1.0) # note the use of .piv.\n assert np.allclose(_c.coords[\"x\"][0], 1.312480)\n assert np.allclose(_c.coords[\"y\"][0], -1.31248)", "def pan(self, dx, dy):\n d = self.getDistance()\n vr = self.getViewRight()\n vr *= dx*d\n GL.glTranslate(vr[0], vr[1], vr[2])\n vu = self.getViewUp()\n vu *= dy*d\n GL.glTranslate(vu[0], vu[1], vu[2])", "def pan(self, dx, dy):\n d = self.getDistance()\n vr = self.getViewRight()\n vr *= dx*d\n GL.glTranslatef(vr[0], vr[1], vr[2])\n vu = self.getViewUp()\n vu *= dy*d\n GL.glTranslatef(vu[0], vu[1], vu[2])", "def pan(self,dx=0.0,dy=0.0):\n self._obj['x'] += dx\n self._obj['y'] += dy\n return self._obj", "def do_pan_view(self, dx, dy):\n auto = self.autoReplot()\n self.setAutoReplot(False)\n axes_to_update = self.get_axes_to_update(dx, dy)\n axis_ids_horizontal = (self.get_axis_id(\"bottom\"), self.get_axis_id(\"top\"))\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (x1, x0, _start, _width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n delta = x1 - x0\n vmin = self.invTransform(axis_id, i_lbound - delta)\n vmax = self.invTransform(axis_id, i_hbound - delta)\n # patch for not \"panning out\"\n if axis_id in axis_ids_horizontal:\n vmin = max(vmin, self.peakmap_range[0])\n vmax = min(vmax, self.peakmap_range[1])\n elif axis_id in axis_ids_vertical:\n vmin = max(vmin, self.peakmap_range[2])\n vmax = min(vmax, self.peakmap_range[3])\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)", "def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)", "def OnMove(self, event): # ANDY PAN\n if event.ShiftDown():\n event.Skip()\n return\n\n # for windows, set focus onto pyslip window\n # linux seems to do this automatically\n if sys.platform == \"win32\" and self.FindFocus() != self:\n self.SetFocus()\n\n # get current mouse position\n (x, y) = event.GetPosition()\n # from common.architecture_support import whoscalling2\n # dbg(whoscalling2())\n\n # self.RaiseMousePositionEvent((x, y))\n\n if event.Dragging() and event.LeftIsDown():\n # are we doing box select?\n if not self.last_drag_x is None:\n # no, just a map drag\n self.was_dragging = True\n dx = self.last_drag_x - x\n dy = self.last_drag_y - y\n\n # dx /= 20\n # dy /= 20\n # dbg(dx)\n # dbg(dy)\n\n # print \"PAN %d %d\" % (dx, dy)\n # print self.GetViewStart()\n currx, curry = self.GetViewStart()\n self.Scroll(\n currx + dx, curry + dy\n ) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).\n # print \"Scroll pan %d %d\" % (currx+dx, curry+dy)\n\n # adjust remembered X,Y\n self.last_drag_x = x\n self.last_drag_y = y\n\n # redraw client area\n self.Update()", "def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)", "def mouse_motion(self, dx, dy):\n dx /= 8; dy /= 8\n self.rot[0] += dy; self.rot[1] -= dx\n if self.rot[0] > 90: self.rot[0] = 90\n elif self.rot[0] < -90: self.rot[0] = -90", "def panZoom(*args, absolute: bool=True, downDistance: float=0.0, leftDistance: float=0.0,\n relative: bool=True, rightDistance: float=0.0, upDistance: float=0.0, zoomRatio:\n float=0.0, **kwargs)->None:\n pass", "def pan(self, parameter):\n self.tx += parameter[0] / self.sx\n self.ty += parameter[1] / self.sy", "def test_pose_shifter(self):\n self.dyn_client.update_configuration({\"linear_offset_x\":0.1, \"linear_offset_y\":0.0, \"linear_offset_z\":0.05})\n pose_in = geometry_msgs.msg.PoseStamped()\n expected = geometry_msgs.msg.PoseStamped()\n pose_in.header.frame_id = \"base_link\"\n expected.header.frame_id = \"base_link\"\n\n pose_in.pose.position.x = 1.0\n pose_in.pose.position.y = 2.0\n pose_in.pose.position.z = 3.0\n pose_in.pose.orientation.x = 0.0\n pose_in.pose.orientation.y = 0.0\n pose_in.pose.orientation.z = 0.0\n pose_in.pose.orientation.w = 1.0\n\n # shift of 10 cm in X and 5 cm in Z\n expected.pose.position.x = 1.1\n expected.pose.position.y = 2.0\n expected.pose.position.z = 3.05\n expected.pose.orientation.x = 0.0\n expected.pose.orientation.y = 0.0\n expected.pose.orientation.z = 0.0\n expected.pose.orientation.w = 1.0\n\n self.pose_in_pub.publish(pose_in)\n\n while not self.wait_for_result:\n self.event_out.publish('e_start')\n\n self.assertEqual(self.result.header.frame_id, expected.header.frame_id)\n self.assertEqual(self.result.pose, expected.pose)", "def panTiltCallback(msg):\n global robot\n cam_pose = msg.data\n if (cam_pose[0] != 0):\n pan_sign = -1 if cam_pose[0]>0 else 1\n robot.camera.set_pan(robot.camera.get_pan() + pan_sign * 0.1)\n rospy.loginfo(\"pan set\")\n if (cam_pose[1] != 0):\n tilt_sign = -1 if cam_pose[1]>0 else 1\n robot.camera.set_tilt(robot.camera.get_tilt() + tilt_sign * 0.1)\n rospy.loginfo(\"tilt set\")", "def panCameraDownTask(self, pos):\n z = camera.getZ()\n if z <= pos:\n self.cameraMoving = 0\n if self.enableMouseCamControl == 1:\n self.game.app.enableMouseCamControl()\n return Task.done\n else:\n camera.setZ(z-self.panSpeed)\n self.cameraMoving = 1\n return Task.cont", "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def pan(self):\n return self._pan", "def panCameraRightTask(self, pos):\n x = camera.getX()\n if x >= pos:\n self.cameraMoving = 0\n if self.enableMouseCamControl == 1:\n self.game.app.enableMouseCamControl()\n return Task.done\n else:\n camera.setX(x+self.panSpeed)\n self.cameraMoving = 1\n return Task.cont", "def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift", "def setPlotShift(x,y):\n dislin.trfshf(x,y)", "def test_pos_1024() -> None:\n assert sw.walk_to(1024).distance == 31", "def pan(self):\n index = self._ordered_input_names.index('pan')\n return self._inputs[index]", "def do_move(self, dx, dy):\n self.rect.move_ip(dx, dy)", "def drag(self, x, y, btn):\n if self._doPan:\n return self._pan.drag(x, y, btn)\n else:\n return super(PanAndSelect, self).drag(x, y, btn)", "def test_stokes_drag():\n assert DM.stokes_drag(fluid_velocity=1.0, particle_velocity=0.0,\n diameter=1.0, rho=1.0, fluid_viscosity=1.0) == 18.0", "def drag(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):\n if orientation == -1:\n orientation = self.getOrientation()\n (x0, y0) = self.__transformPointByOrientation((x0, y0), orientation, self.getOrientation())\n (x1, y1) = self.__transformPointByOrientation((x1, y1), orientation, self.getOrientation())\n\n version = self.device.get_sdk_version()\n if version <= 15:\n self.logger.error(\"drag: API <= 15 not supported (version=%d)\" % version)\n elif version <= 17:\n self.shell(\"input swipe %d %d %d %d\" % (x0, y0, x1, y1))\n else:\n self.shell(\"input touchscreen swipe %d %d %d %d %d\" % (x0, y0, x1, y1, duration))", "def test_shift_returns_value(new_dll):\n assert new_dll.shift() == 5", "def move_point(p, direction, d=1):\n direction_guard(direction)\n x, y = p\n dx, dy = directions[direction]\n return (x + dx * d, y + dy * d)", "def test_revolute_from_dh(self):\n x_offset = 1\n z_offset = 2\n # Rotate around the z axis\n r = Joint.revolute_from_dh(0, 0, x_offset, z_offset)\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], z_offset))\n # x was rotated 90 degrees, and is now y\n self.assertTrue(np.allclose(t_mat[1, 3], x_offset))", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)" ]
[ "0.690588", "0.62539476", "0.6154423", "0.60330296", "0.59153146", "0.5658531", "0.5642843", "0.55909723", "0.5532715", "0.5478135", "0.5469953", "0.5446774", "0.5398293", "0.537031", "0.53141296", "0.53141296", "0.531386", "0.52334946", "0.51988685", "0.5184004", "0.5176783", "0.5165479", "0.51344836", "0.51316893", "0.5125932", "0.51203644", "0.5110411", "0.5104686", "0.5100209", "0.50926864" ]
0.6563625
1
tests setting the new dt
def test_set_get_dt(): data = io.create_sample_dataset() assert data.attrs["dt"] == 1.0 assert data.piv.dt == 1.0 data.piv.set_dt(2.0) assert data.attrs["dt"] == 2.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")", "def test_set_get_dt():\n data = io.create_sample_Dataset()\n assert data.attrs[\"delta_t\"] == 0.0\n\n data.piv.set_delta_t(2.0)\n assert data.attrs[\"delta_t\"] == 2.0", "def test_dt_preservation(self):\n dt = 0.6767335\n test_rec = rt.Recording([[[0, 1, 2]]], dt=dt)\n self.assertEqual(\n test_rec.dt,\n dt,\n 'Assigned `dt` not equal to attribute `dt`; test code probably '\n 'broken.',\n )\n self.assertEqual(\n test_rec[..., 1:].dt,\n dt,\n '`Recording.dt` attribute altered by slicing.',\n )\n self.assertEqual(\n test_rec[..., 1].dt,\n dt,\n '`dt` attribute altered by retrieving single element of `Recording`.',\n )", "def test_update_dt(self):\n result = self.test_client.update_dt\n\n assert result == \"2020-02-18 01:54:13\"", "def update_dt(self, data_code, dt):\n self.data_code = data_code\n self.apply_data_code()\n msg = 'update_dt not implemented in the %s class' % self.__class__.__name__\n raise RuntimeError(msg)\n #assert dt>=0.\n #print \"updating dt...dt=%s\" %(dt)\n if dt is not None:\n self.dt = dt\n self.add_new_transient()", "def update(self, dt):\n pass", "def update(self, dt):\n\t\tpass", "def test_datetime_update(self):\r\n now = datetime.now()\r\n vm = DateTime.value_manager(None, None, now)\r\n assert not vm.changed\r\n vm.value = now + timedelta(days=1)\r\n assert vm.changed", "def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0", "def test_importtleUpdatesDataSourceUpdateDate(self):\n\n # Edit the DataSource to make sure the date must be updated\n datasource = DataSource.objects.get(system_name='tle_test')\n datasource.last_time_checked = timezone.make_aware(datetime(2000, 1, 1))\n datasource.save()\n\n timestamp = format(datasource.last_time_checked, 'U')\n\n try:\n call_command('importtle', 'tle_test', stdout=StringIO())\n except ConnectionError:\n pass\n\n datasource = DataSource.objects.get(system_name='tle_test')\n\n self.assertNotEquals(\n timestamp,\n format(datasource.last_time_checked, 'U')\n )", "def update(self, dt):", "def update(self, dt):", "def test_change_default_dt_static(self):\n ct.set_defaults('control', default_dt=0)\n assert ct.tf(1, 1).dt is None\n assert ct.ss([], [], [], 1).dt is None", "def update(self, dt):\n\n self.collecting(dt)", "def test_get_n_set_date(self):\n\n self.assertEqual(self.bmon_fn_2['timestamp'], self.timestamp_2)\n\n new_timestamp = datetime(2009, 1, 1, 12, 23, 33)\n self.bmon_fn_2['timestamp'] = new_timestamp\n\n self.assertEqual(self.bmon_fn_2['timestamp'], new_timestamp)", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def test_change_default_dt(self, dt):\n ct.set_defaults('control', default_dt=dt)\n assert ct.ss(1, 0, 0, 1).dt == dt\n assert ct.tf(1, [1, 1]).dt == dt\n nlsys = ct.NonlinearIOSystem(\n lambda t, x, u: u * x * x,\n lambda t, x, u: x, inputs=1, outputs=1)\n assert nlsys.dt == dt", "def test_fill_data_with_days_in_dtes(self):\n date = pd.to_datetime('2009-01-15')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n self.assertTrue(len(df_iv))", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def test_date_fields(self):\r\n sequential = self.get_item_from_modulestore(self.seq_usage_key)\r\n self.assertIsNone(sequential.due)\r\n self.client.ajax_post(\r\n self.seq_update_url,\r\n data={'metadata': {'due': '2010-11-22T04:00Z'}}\r\n )\r\n sequential = self.get_item_from_modulestore(self.seq_usage_key)\r\n self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))\r\n self.client.ajax_post(\r\n self.seq_update_url,\r\n data={'metadata': {'start': '2010-09-12T14:00Z'}}\r\n )\r\n sequential = self.get_item_from_modulestore(self.seq_usage_key)\r\n self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))\r\n self.assertEqual(sequential.start, datetime(2010, 9, 12, 14, 0, tzinfo=UTC))", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def _setVals(self, datetime=0):\n self.datetime = datetime", "def dt_changed(self):\n self.dateTimeEdit_2.setMinimumDateTime(self.dateTimeEdit.dateTime())", "def dst(self, dt):", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def updateTimeStep(self, newDt):\n self.timeStep = newDt", "def test_date(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_date')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_date' \\\n '( value DATE NOT NULL )')\n cursor.execute(query)\n data = []\n rr = random.randrange\n query = 'INSERT INTO test_date VALUES (%s)'\n for i in range(100):\n item = datetime.date(rr(1,10000), rr(1,13), rr(1,29))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_date'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, datetime.date)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_date')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_date')\n cursor.execute(query)\n conn.commit()" ]
[ "0.7056691", "0.6960428", "0.6936673", "0.6897115", "0.6577245", "0.6459469", "0.64490354", "0.62812555", "0.6240081", "0.6226313", "0.62065417", "0.62065417", "0.61387527", "0.61046886", "0.60846764", "0.59929615", "0.5981789", "0.5883374", "0.5859649", "0.5851787", "0.5834196", "0.5834196", "0.58099455", "0.5785883", "0.57564706", "0.5750733", "0.5750733", "0.5750733", "0.57477474", "0.5609705" ]
0.72287667
0
tests curl that is also vorticity
def test_curl(): _a = io.load_vec(os.path.join(path, f1)) _a.piv.vec2scal(property="curl") assert _a.attrs["variables"][-1] == "vorticity"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_curl():\n _c = _a.copy()\n _c.piv.vec2scal(flow_property=\"curl\")\n\n assert _c[\"w\"].attrs[\"standard_name\"] == \"vorticity\"", "def test_requests():\n resp = requests.get('http://www.google.com')\n return True if resp.status_code == 200 else False", "def test_urls_work(url):\n with requests.get(url) as r:\n assert r.status_code == 200", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "def test_1():\n\tassert api_call().status_code == 200", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def test_vouching(self, mock_requests_get):\n\n cases = (\n # Valid vouched user\n (self.vouched_user, 200, None, True),\n # Valid unvouched user\n (self.unvouched_user, 200, None, False),\n # Unknown user at mozillians\n (self.unknown_user, 200, json.dumps(dict(objects=[])), False),\n # Bad JSON response from API\n (self.vouched_user, 200, 'BADRESPONSE', False),\n # Forbidden resonse from API\n (self.vouched_user, 401, 'FORBIDDEN', False),\n )\n \n # Set up to simulate API requests / responses\n inputs = dict(status_code=200, headers={}, content='')\n outputs = {}\n def my_requests_get(url, headers=None, timeout=None):\n outputs['url'] = url\n outputs['headers'] = headers\n if inputs['content'] is not None:\n content = inputs['content']\n else:\n content = json.dumps(dict(objects=[\n {\"email\": inputs['email'],\n \"is_vouched\": inputs['vouched']}\n ]))\n return FakeResponse(status_code=200, headers={}, content=content)\n mock_requests_get.side_effect = my_requests_get\n\n # Iterate through test cases\n for user, status_code, content, expected in cases:\n cache.clear()\n inputs.update(dict(status_code=status_code, content=content,\n email=user.email, vouched=expected))\n \n # Run the vouching method, ensure expected\n result = user.get_profile().is_vouched_mozillian()\n eq_(expected, result)\n\n # ensure the URL requested matches expected values\n parsed = urlparse.urlparse(outputs['url'])\n params = urlparse.parse_qs(parsed.query)\n eq_(user.email, params['email'][0])\n eq_('testappname', params['app_name'][0])\n eq_('8675309', params['app_key'][0])", "def test_GET5(self):\n r = requests.get(self.address + \"/carca\")\n self.assertEqual(r.status_code, 400)", "def _api_call(self, url, response_checker):\n self.request_compare(url)", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''", "def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def test_request():\n response = requests.get('http://jsonplaceholder.typicode.com/todos')\n assert response.ok", "async def test_method_lower(http2_serv):\n url = http2_serv\n async with aiosonic.HTTPClient() as client:\n res = await client.request(url, method=\"get\", verify=False)\n assert res.status_code == 200\n assert \"Hello World\" == await res.text()", "def test_fetch_url_ok():\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.ok\n response = fetch_url(\"http://example.com\", TIMEOUT, getLogger())\n eq_(codes.ok, response.status_code)", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_response_200_on_get(self):\n pass", "def test_GET_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_GET_request(\n GET_ECHO_ENDPOINT,\n params=params\n )\n\n ## test that response json can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def test_vcr():\n http_manager = urllib3.PoolManager()\n response = http_manager.request(\n \"GET\", \"https://developer.xero.com/documentation/oauth2/auth-flow\"\n )\n assert response.status == 200\n assert \"Xero is a multi-tenanted platform.\" in response.data.decode(\"utf-8\")", "def test_GET4(self):\n r = requests.get(self.address + \"/carcar/23\")\n self.assertEqual(r.status_code, 400)", "def test_link(link):\n r = requests.get(link)\n if (r.status_code != 200):\n return False\n else:\n return True", "def test_links(get_good_response):\n response = get_good_response(\"/links\")\n\n assert \"data\" in response", "def check_post(self, url, info):\r\n \r\n test = requests.get(url, headers = self.headers).json()['results']\r\n if info == test:\r\n return True\r\n return False", "def test_good_get_url(self):\n result = self._search('Love Story', just_results=True)\n get_url = result[0]['get_url']\n resp = self.app.get(get_url)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('url', resp.data)\n self.assertIn('/d?', resp.data)", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def simple_test_open_url(url):\n try:\n return requests.get(url, headers={\"User-Agent\": random.choice(useragents.useragents())}).status_code\n except Exception as _:\n return False", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)" ]
[ "0.6593276", "0.65311223", "0.6425243", "0.6285262", "0.6247957", "0.6231689", "0.6167364", "0.61448026", "0.61036676", "0.6090551", "0.6047839", "0.6033975", "0.60323995", "0.6023737", "0.6006356", "0.599038", "0.59793335", "0.59708685", "0.5966935", "0.5953404", "0.594355", "0.5933717", "0.59293365", "0.590611", "0.5902825", "0.5895938", "0.5895738", "0.5886683", "0.58603865", "0.58603865" ]
0.69430053
0
Test case for christiandoctrines_id_get Get a single ChristianDoctrine by its id
def test_christiandoctrines_id_get(self): headers = { 'Accept': 'application/json', } response = self.client.open( '/v0.0.1/christiandoctrines/{id}'.format(id='id_example'), method='GET', headers=headers) self.assert200(response, 'Response body is : ' + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_solareclipses_id_get(self):\n pass", "def test_metrostations_id_get(self):\n pass", "def test_cyclingleagues_id_get(self):\n pass", "def test_get_id(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # First pick up by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res = self.metadata.get_by_id(entity=Dashboard, entity_id=res_name.id)\n\n self.assertEqual(res_name.id, res.id)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_find_entity_by_id_action(self):\n pass", "def test_comicscreators_id_get(self):\n pass", "def get(self, _id):", "def test_get_case_by_id(self):\n pass", "def test_drugs_id_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_get_by_id(self):\n\n user = CustomUser.get_by_id(2)\n expected_user = CustomUser.objects.get(id=2)\n self.assertEqual(user, expected_user)", "def test_get_article_by_id():\n article = Article(\n author = '[email protected]',\n title = 'New Article',\n content = 'Super extra awesome article'\n ).save()\n\n query = GetArticleByIDQuery(\n id = article.id\n )\n\n assert query.execute().id == article.id", "def get_object(id):", "def test_get_by_id(db):\n thing = Thing(id=1, name=\"Thing A\")\n db.session.query(Thing).delete()\n db.session.commit()\n\n db.session.add(thing)\n db.session.commit()\n\n retrieved = Thing.query.get(thing.id)\n assert retrieved == thing\n assert repr(retrieved) == \"<Thing 'Thing A'>\"", "def test_get_chain_by_id(self):\n pass", "def test_medicians_id_get(self):\n pass", "def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def test_get_comment_information_by_id():\n get_comment_information_by_id('g99c7c0')" ]
[ "0.6760872", "0.65164095", "0.645328", "0.64164436", "0.63837415", "0.637381", "0.6357469", "0.6348666", "0.6338639", "0.6293266", "0.6275537", "0.6274834", "0.6265995", "0.6264964", "0.61286145", "0.6107147", "0.6094154", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.6090681", "0.60257167" ]
0.66512054
1
Unit test for doublecompress.c Uses Oct2Py to call the octave version double_compressxv.m
def unit_doublecompress(Verbose=False): from get_configuration import Get_SWIG_Simulation # Get Simulation Object, including Pointers to C structures test_config_files = ["source/configfiles/unit_tests/doublecompress_test.json"] sim = Get_SWIG_Simulation(test_config_files, Verbose) # Calculate the length of the Linac Nlinac = len(sim.linac_list) # Load parameters from Octave sav file with stored configuration oct_config_file = "./source/configfiles/unit_tests/" oct2py.octave.addpath(oct_config_file) loadout = oct2py.octave.load("double_compress_params_octave.sav") # Extract Octave data structure containing Linacs' configuration params = loadout.pdc # Allocate C Arrays for doublecompress inputs dphivr = acc.double_Array(Nlinac) dV_Vvr = acc.double_Array(Nlinac) # Outputs dc_noise_srcs = acc.Noise_Srcs() dcs = acc.Doublecompress_State() acc.Doublecompress_State_Allocate(dcs, Nlinac) # Pointers to outputs Ipk = acc.double_Array_frompointer(dcs.Ipk) sz = acc.double_Array_frompointer(dcs.sz) dE_E = acc.double_Array_frompointer(dcs.dE_E) sd = acc.double_Array_frompointer(dcs.sd) dt = acc.double_Array_frompointer(dcs.dt) sdsgn = acc.double_Array_frompointer(dcs.sdsgn) k = acc.double_Array_frompointer(dcs.k) Eloss = acc.double_Array_frompointer(dcs.Eloss) dE_Ei = acc.double_Array_frompointer(dcs.dE_Ei) dE_Ei2 = acc.double_Array_frompointer(dcs.dE_Ei2) cor = acc.double_Array_frompointer(dcs.cor) # Number of Simulation runs testnum = 30 # Store Maximum error after each run (initialize at 0) maxerr = 0.0 # Run testnum Simulation runs for cnt in range(testnum): np.random.seed(1301+cnt) # Find random values for Octave Linac configuration for l in range(Nlinac): params.lamv[0][l] = rand()+.2 params.Lv[0][l] = 100*rand()+10 params.av[0][l] = (20*rand())+15.0 params.R56v[0][l] = .002*(rand()-.5)*2.0 params.T566v[0][l] = .002*(rand()-.5)*2.0 params.phiv[0][l] = 360.0*rand() params.s0v[0][l] = 3.0*rand()+.5 # Copy and do unit conversion for C version sim.linac_list[l].C_Pointer.lam = params.lamv[0][l] sim.linac_list[l].C_Pointer.L = params.Lv[0][l] sim.linac_list[l].C_Pointer.a = params.av[0][l]/1000.0 sim.linac_list[l].C_Pointer.R56 = params.R56v[0][l] sim.linac_list[l].C_Pointer.T566 = params.T566v[0][l] sim.linac_list[l].C_Pointer.phi = params.phiv[0][l]*np.pi/180.0 sim.linac_list[l].C_Pointer.s0 = params.s0v[0][l]/1000.0 # Introduce some pseudo-random numbers into noise source inputs dN_N = 100*(rand()-.5)*2 dtg = 500*(rand()-.5)*2 # [picoseconds] dEg = .001*(rand()-.5)*2 # [Gev] dsig_z = params.sz0*(rand()-.5)*2 # [mm] dsig_E = 100*(rand()-.5)*2 # [%] chirp = .00001*(rand()-0)*2*0 # [m] # Vectors for Octave code dphiv_oct, dV_Vv_oct = np.zeros((2, Nlinac), dtype=float) for i in range(Nlinac): dphiv_oct[i] = 180*(rand()-.5)*2 # [deg] dV_Vv_oct[i] = 100*(rand()-.5)*2 # [%] # Copy and do unit conversion for C version dc_noise_srcs.dQ_Q = dN_N/100 dc_noise_srcs.dtg = dtg/1e12 dc_noise_srcs.dE_ing = dEg*1e9 dc_noise_srcs.dsig_z = dsig_z/1000 dc_noise_srcs.dsig_E = dsig_E/100 dc_noise_srcs.dchirpt = chirp # Copy and do unit conversion for C version for i in range(Nlinac): # Inputs dphivr[i] = dphiv_oct[i]*np.pi/180 dV_Vvr[i] = dV_Vv_oct[i]/100 # Call the C routine via SWIG acc.Doublecompress(sim.gun.C_Pointer, sim.C_Pointer.linac_net, Nlinac, dc_noise_srcs, dphivr, dV_Vvr, dcs) # Call the Octave routine using Oct2py Ipk_o, sz_o, dE_E_o, sd_o, dt_o, sdsgn_o, k_o, Eloss_o\ , dE_Ei_o, dE_Ei2_o, cor1_o = oct2py.octave.double_compressxv(params, dN_N, dtg, dEg, dsig_z, dsig_E, chirp, dphiv_oct, dV_Vv_oct, sim.gun.C_Pointer.Q, verbose=False, nout=11) # Subroutine for subtracting and scaling outputs # for error calcualtion def diffoctcarr(inputoct, inputcarr, scale=1.0): N = len(inputoct[0]) out = np.zeros(N, dtype=float) for k in range(N): out[k] = (inputoct[0][k]-inputcarr[k]*scale) return out # Get the errors for the different outputs errIpk = abs(diffoctcarr(Ipk_o, Ipk)) errsz = abs(diffoctcarr(sz_o, sz, 1.0e3)) errdE_E = abs(diffoctcarr(dE_E_o, dE_E, 100.0)) errsd = abs(diffoctcarr(sd_o, sd, 100.0)) errdt = abs(diffoctcarr(dt_o, dt, 1.0e12)) errsdsgn = abs(diffoctcarr(sdsgn_o, sdsgn, 100.0)) errk = abs(diffoctcarr(k_o, k)) errEloss = abs(diffoctcarr(Eloss_o, Eloss, 1.0e-9)) errdE_Ei = abs(diffoctcarr(dE_Ei_o, dE_Ei, 100.0)) errdE_Ei2 = abs(diffoctcarr(dE_Ei2_o, dE_Ei2, 100.0)) errcor = abs(diffoctcarr([cor1_o[0, 1:]], cor)) # Find maximum error for this run temp = np.max([errIpk, errsz, errdE_E, errsd, errdt, errsdsgn, errk, errEloss, errdE_Ei, errdE_Ei2, errcor]) # Compare to maximum of previous runs and keep highest error maxerr = max(temp, maxerr) print " After {0} runs with random inputs the".format(testnum) print " Maximum difference is {0}\n".format(maxerr) # Error threshold tol = 1e-8 # Unit passes if maximum error lower than error threshold unit_pass = maxerr < tol return unit_pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_tests():\n print \"\\n****\\nTesting Doublecompress...\\n\"\n dc_pass = unit_doublecompress()\n if (dc_pass):\n result = 'PASS'\n else:\n result = 'FAIL'\n print \">>> \" + result\n\n return dc_pass", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n # make sure we normally go outside the range\n self.assertGreater(np.sum(M1.out < mrate), 0)\n self.assertGreater(np.sum(M1.out > Mrate), 0)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertEqual(np.sum(M2.out < mrate), 0)\n self.assertEqual(np.sum(M2.out > Mrate), 0)", "def test_compress_cmd():\n # GIVEN a cli runner\n runner = CliRunner()\n # WHEN running the compress command with dry_run\n result = runner.invoke(compress, obj={})\n # THEN assert the command was succesful even without a valid api\n assert result.exit_code == 0", "def testSC(self):\n\n obt_np = compression.decByteOffet_numpy(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_np).max(), 0.0, \"numpy algo\")\n obt_cy = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_cy).max(), 0.0, \"cython algo\")\n obt_cy2 = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_cy2).max(), 0.0, \"cython algo_orig\")\n obt_we = compression.decByteOffet_weave(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_we).max(), 0.0, \"weave algo\")", "def test01(self):\n a = np.arange(2e5)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b uncompressed-->\", b.nbytes\n # print \"size b compressed -->\", b.cbytes\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")", "def test02(self):\n a = np.arange(111)\n b = bcolz.carray(a)\n # print \"size b uncompressed-->\", b.nbytes\n # print \"size b compressed -->\", b.cbytes\n self.assertTrue(sys.getsizeof(b) > b.nbytes,\n \"carray compress too much??\")", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def test02a(self):\n np.random.seed(10)\n a = np.cumsum(np.random.random_sample(100*1000)-0.5) # random walk\n if common.verbose:\n print(\"Checking quantize filter\")\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n cparams = bcolz.cparams(quantize=0)\n b = bcolz.carray(a, cparams=cparams, rootdir=self.rootdir)\n b_cbytes = b.cbytes\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # print \"size b compressed -->\", b_cbytes\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)\n cparams = bcolz.cparams(quantize=3)\n c = bcolz.carray(a, cparams=cparams, rootdir=self.rootdir)\n # print \"size c compressed -->\", c.cbytes\n self.assertTrue(c.cbytes < 0.7 * b_cbytes,\n \"quantize does not seem to improve compression \"\n \"significantly\")\n assert_array_almost_equal(a, c[:], 3, \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)", "def test01c(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n with bcolz.defaults_ctx(bcolz.cparams(clevel=9, cname=cname)):\n self.assertTrue(bcolz.defaults.cparams['cname'] == cname)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test00(self):\n a = np.arange(20)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n b = bcolz.carray(a, rootdir=self.rootdir,\n cparams=bcolz.cparams(clevel=9, cname=cname))\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) > b.nbytes,\n \"compression does not seem to have any overhead\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test01a(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n bcolz.cparams.setdefaults(clevel=9, cname=cname)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_jit_example(self):\n source = io.StringIO(\"\"\"\n int mega_complex_stuff(int* a, int* b, int count) {\n int sum = 0;\n int i;\n for (i=0; i < count; i++)\n sum += a[i] * b[i];\n return sum;\n }\n \"\"\")\n arch = get_current_arch()\n html_filename = make_filename(self.id()) + '.html'\n with html_reporter(html_filename) as reporter:\n obj = cc(source, arch, debug=True, reporter=reporter)\n m = load_obj(obj)\n # print(m.x.argtypes)\n count = 6\n T = ctypes.c_int * count\n a = T()\n a[:] = 1, 0, 2, 0, 3, 0\n b = T()\n b[:] = 5, 0, 4, 0, 9, 0\n y = m.mega_complex_stuff(a, b, count)\n self.assertEqual(40, y)", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def testDecompress(self):\n decompressor = xz_decompressor.XZDecompressor()\n\n compressed_data = (\n b'\\xfd7zXZ\\x00\\x00\\x01i\"\\xde6\\x02\\xc0\\x13\\x0f!\\x01\\x16\\x00\\xc0\\xb7\\xdc'\n b'\\xe9\\x01\\x00\\x0eThis is a test.\\x00\\x00]\\xc9\\xc3\\xc6\\x00\\x01#\\x0f\\xdb'\n b'\\xdf\\x90\\x0e\\x90B\\x99\\r\\x01\\x00\\x00\\x00\\x00\\x01YZ')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger xz raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger xz raising IOError.\n decompressor = xz_decompressor.XZDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def test01b(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n bcolz.defaults.cparams = {\n 'clevel': 9, 'shuffle': bcolz.SHUFFLE, 'cname': cname,\n 'quantize': 0}\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def test_decompress_1(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 49])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabc'\n self.assertEqual(actual, expected)", "def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())", "def decompression_inversion():\n dna_seq, bin_seq, comp_seq, file_comp = binary_to_seq()\n \n #bwt reconstruction\n table = [\"\"] * len(dna_seq)\n\n for i in range(0,len(dna_seq),1):\n table = [dna_seq[i] + table[i] for i in range(0,len(dna_seq))]\n table = sorted(table)\n \n original_seq = None \n for row in table : \n if row.endswith(\"$\"):\n original_seq = row\n\n inverse_bwt = original_seq.rstrip(\"$\") \n \n \n #write the original sequence in a new created file \n file_path = os.path.splitext(file_comp)[0]\n file_inv = open(file_path + \"_decompressed_original.txt\", \"w\") \n file_inv.write(inverse_bwt) \n file_inv.close()\n \n messagebox.showinfo(\"Information\", \"Your decompressed and bwt reconstruction has been saved in \" \\\n +file_path +\"_decompressed_original.txt file.\")\n \n return dna_seq, comp_seq, inverse_bwt", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def test01c(self):\n a = np.arange(1e5)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n # print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n with bcolz.defaults_ctx(bcolz.cparams(clevel=9, shuffle=filter_)):\n self.assertTrue(bcolz.defaults.cparams['shuffle'] == filter_)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print(\"size b compressed -->\", b.cbytes, \"with '%s'\" % filter_)\n if filter_ > 0:\n self.assertTrue(bcolz.defaults.cparams['shuffle'] == bcolz.SHUFFLE)\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def decompress_lzx(input_filename, output_filename):\n mspack = None\n try:\n if platform.architecture() == ('64bit', 'WindowsPE'):\n mspack = CDLL(\".\\mspack.x64.dll\")\n elif platform.architecture() == ('32bit', 'WindowsPE'):\n mspack = CDLL(\".\\mspack.x86.dll\")\n else:\n mspack = CDLL(\"libmspack.so\") # Requires libmspack built for *nix - e.g. `sudo apt-get install libmspack0`\n except:\n exit(\"Unable to locate libmspack for your operating system. Can you install it, e.g. sudo apt-get install libmspack0\")\n\n\n class msoab_decompressor (Structure):\n pass # the struct is self referencing, so need to declare the class THEN set _fields_\n\n\n msoab_decompressor._fields_ = [\n ('decompress', CFUNCTYPE(c_int, POINTER(msoab_decompressor), c_char_p, c_char_p)),\n ('decompress_incremental', CFUNCTYPE(c_int, POINTER(msoab_decompressor), c_char_p, c_char_p, c_char_p)),\n ('set_param', CFUNCTYPE(c_int, POINTER(msoab_decompressor), c_int, c_int))\n ]\n\n version = mspack.mspack_version(14) # 14 = MSPACK_VER_MSOABD\n assert (version > 1)\n\n mspack.mspack_create_oab_decompressor.restype = POINTER(msoab_decompressor)\n decompressor = mspack.mspack_create_oab_decompressor(None)\n\n err = decompressor.contents.decompress(decompressor,\n input_filename.encode(\"us-ascii\"),\n output_filename.encode(\"us-ascii\"))\n\n mspack.mspack_destroy_oab_decompressor(decompressor)", "def test01b(self):\n a = np.arange(1e5)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n #print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n bcolz.defaults.cparams = {\n 'clevel': 9, 'shuffle': filter_, 'cname': \"blosclz\",\n 'quantize': 0}\n b = bcolz.carray(a, rootdir=self.rootdir)\n #print(\"size b compressed -->\", b.cbytes, \"with '%s'\" % filter_)\n if filter_ > 0:\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)\n # Restore defaults\n bcolz.defaults.cparams = {\n 'clevel': 5, 'shuffle': bcolz.SHUFFLE, 'cname': 'blosclz',\n 'quantize': 0}", "def test_double(self):\n from random import randint\n from ctypes import byref, c_double\n # back up array.\n b_orig = self.b.flatten()\n # run FORTRAN subroutine.\n tval = float(randint(0,10000000))\n self.args[1] = byref(c_double(tval))\n self.lib_c_ctypes.ctypes_test(*self.args)\n # revert in Python and test.\n self.b[1:,:] -= tval\n b = self.b.flatten()\n for i in range(len(b)):\n self.assertEqual(b[i], b_orig[i])", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def test01a(self):\n a = np.arange(1e5)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n # print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n bcolz.cparams.setdefaults(clevel=9, shuffle=filter_)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print(\"size b compressed -->\", b.cbytes, \"with '%s'\" % filter_)\n if filter_ > 0:\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)" ]
[ "0.6566176", "0.60859174", "0.5888738", "0.58693385", "0.57645285", "0.55487853", "0.55210376", "0.5506815", "0.5506815", "0.5505801", "0.54477817", "0.5363019", "0.53435475", "0.53034425", "0.525868", "0.52342796", "0.5219277", "0.5198363", "0.5187116", "0.5185466", "0.51466125", "0.5117568", "0.51161003", "0.51161003", "0.5106582", "0.5069728", "0.50689113", "0.50500107", "0.50497806", "0.5024951" ]
0.75801855
0
Perform all unit tests for doublecompress.c/h and return a PASS/FAIL boolean.
def perform_tests(): print "\n****\nTesting Doublecompress...\n" dc_pass = unit_doublecompress() if (dc_pass): result = 'PASS' else: result = 'FAIL' print ">>> " + result return dc_pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_doublecompress(Verbose=False):\n\n from get_configuration import Get_SWIG_Simulation\n\n # Get Simulation Object, including Pointers to C structures\n test_config_files = [\"source/configfiles/unit_tests/doublecompress_test.json\"]\n sim = Get_SWIG_Simulation(test_config_files, Verbose)\n\n # Calculate the length of the Linac\n Nlinac = len(sim.linac_list)\n\n # Load parameters from Octave sav file with stored configuration\n oct_config_file = \"./source/configfiles/unit_tests/\"\n oct2py.octave.addpath(oct_config_file)\n loadout = oct2py.octave.load(\"double_compress_params_octave.sav\")\n\n # Extract Octave data structure containing Linacs' configuration\n params = loadout.pdc\n\n # Allocate C Arrays for doublecompress inputs\n dphivr = acc.double_Array(Nlinac)\n dV_Vvr = acc.double_Array(Nlinac)\n\n # Outputs\n dc_noise_srcs = acc.Noise_Srcs()\n dcs = acc.Doublecompress_State()\n acc.Doublecompress_State_Allocate(dcs, Nlinac)\n\n # Pointers to outputs\n Ipk = acc.double_Array_frompointer(dcs.Ipk)\n sz = acc.double_Array_frompointer(dcs.sz)\n dE_E = acc.double_Array_frompointer(dcs.dE_E)\n sd = acc.double_Array_frompointer(dcs.sd)\n dt = acc.double_Array_frompointer(dcs.dt)\n sdsgn = acc.double_Array_frompointer(dcs.sdsgn)\n k = acc.double_Array_frompointer(dcs.k)\n Eloss = acc.double_Array_frompointer(dcs.Eloss)\n dE_Ei = acc.double_Array_frompointer(dcs.dE_Ei)\n dE_Ei2 = acc.double_Array_frompointer(dcs.dE_Ei2)\n cor = acc.double_Array_frompointer(dcs.cor)\n\n # Number of Simulation runs\n testnum = 30\n\n # Store Maximum error after each run (initialize at 0)\n maxerr = 0.0\n\n # Run testnum Simulation runs\n for cnt in range(testnum):\n\n np.random.seed(1301+cnt)\n\n # Find random values for Octave Linac configuration\n for l in range(Nlinac):\n params.lamv[0][l] = rand()+.2\n params.Lv[0][l] = 100*rand()+10\n params.av[0][l] = (20*rand())+15.0\n params.R56v[0][l] = .002*(rand()-.5)*2.0\n params.T566v[0][l] = .002*(rand()-.5)*2.0\n params.phiv[0][l] = 360.0*rand()\n params.s0v[0][l] = 3.0*rand()+.5\n\n # Copy and do unit conversion for C version\n sim.linac_list[l].C_Pointer.lam = params.lamv[0][l]\n sim.linac_list[l].C_Pointer.L = params.Lv[0][l]\n sim.linac_list[l].C_Pointer.a = params.av[0][l]/1000.0\n sim.linac_list[l].C_Pointer.R56 = params.R56v[0][l]\n sim.linac_list[l].C_Pointer.T566 = params.T566v[0][l]\n sim.linac_list[l].C_Pointer.phi = params.phiv[0][l]*np.pi/180.0\n sim.linac_list[l].C_Pointer.s0 = params.s0v[0][l]/1000.0\n\n # Introduce some pseudo-random numbers into noise source inputs\n dN_N = 100*(rand()-.5)*2\n dtg = 500*(rand()-.5)*2 # [picoseconds]\n dEg = .001*(rand()-.5)*2 # [Gev]\n dsig_z = params.sz0*(rand()-.5)*2 # [mm]\n dsig_E = 100*(rand()-.5)*2 # [%]\n chirp = .00001*(rand()-0)*2*0 # [m]\n\n # Vectors for Octave code\n dphiv_oct, dV_Vv_oct = np.zeros((2, Nlinac), dtype=float)\n for i in range(Nlinac):\n dphiv_oct[i] = 180*(rand()-.5)*2 # [deg]\n dV_Vv_oct[i] = 100*(rand()-.5)*2 # [%]\n\n # Copy and do unit conversion for C version\n dc_noise_srcs.dQ_Q = dN_N/100\n dc_noise_srcs.dtg = dtg/1e12\n dc_noise_srcs.dE_ing = dEg*1e9\n dc_noise_srcs.dsig_z = dsig_z/1000\n dc_noise_srcs.dsig_E = dsig_E/100\n dc_noise_srcs.dchirpt = chirp\n\n # Copy and do unit conversion for C version\n for i in range(Nlinac):\n # Inputs\n dphivr[i] = dphiv_oct[i]*np.pi/180\n dV_Vvr[i] = dV_Vv_oct[i]/100\n\n # Call the C routine via SWIG\n acc.Doublecompress(sim.gun.C_Pointer, sim.C_Pointer.linac_net, Nlinac,\n dc_noise_srcs, dphivr, dV_Vvr, dcs)\n\n # Call the Octave routine using Oct2py\n Ipk_o, sz_o, dE_E_o, sd_o, dt_o, sdsgn_o, k_o, Eloss_o\\\n , dE_Ei_o, dE_Ei2_o, cor1_o = oct2py.octave.double_compressxv(params,\n dN_N, dtg, dEg, dsig_z,\n dsig_E, chirp,\n dphiv_oct, dV_Vv_oct, sim.gun.C_Pointer.Q, verbose=False, nout=11)\n\n # Subroutine for subtracting and scaling outputs\n # for error calcualtion\n def diffoctcarr(inputoct, inputcarr, scale=1.0):\n N = len(inputoct[0])\n out = np.zeros(N, dtype=float)\n for k in range(N):\n out[k] = (inputoct[0][k]-inputcarr[k]*scale)\n return out\n\n # Get the errors for the different outputs\n errIpk = abs(diffoctcarr(Ipk_o, Ipk))\n errsz = abs(diffoctcarr(sz_o, sz, 1.0e3))\n errdE_E = abs(diffoctcarr(dE_E_o, dE_E, 100.0))\n errsd = abs(diffoctcarr(sd_o, sd, 100.0))\n errdt = abs(diffoctcarr(dt_o, dt, 1.0e12))\n errsdsgn = abs(diffoctcarr(sdsgn_o, sdsgn, 100.0))\n errk = abs(diffoctcarr(k_o, k))\n errEloss = abs(diffoctcarr(Eloss_o, Eloss, 1.0e-9))\n errdE_Ei = abs(diffoctcarr(dE_Ei_o, dE_Ei, 100.0))\n errdE_Ei2 = abs(diffoctcarr(dE_Ei2_o, dE_Ei2, 100.0))\n errcor = abs(diffoctcarr([cor1_o[0, 1:]], cor))\n\n # Find maximum error for this run\n temp = np.max([errIpk,\n errsz,\n errdE_E,\n errsd,\n errdt,\n errsdsgn,\n errk,\n errEloss,\n errdE_Ei,\n errdE_Ei2,\n errcor])\n\n # Compare to maximum of previous runs and keep highest error\n maxerr = max(temp, maxerr)\n\n print \" After {0} runs with random inputs the\".format(testnum)\n print \" Maximum difference is {0}\\n\".format(maxerr)\n\n # Error threshold\n tol = 1e-8\n\n # Unit passes if maximum error lower than error threshold\n unit_pass = maxerr < tol\n\n return unit_pass", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def test00(self):\n a = np.arange(20)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n b = bcolz.carray(a, rootdir=self.rootdir,\n cparams=bcolz.cparams(clevel=9, cname=cname))\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) > b.nbytes,\n \"compression does not seem to have any overhead\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)", "def test_compress_cmd():\n # GIVEN a cli runner\n runner = CliRunner()\n # WHEN running the compress command with dry_run\n result = runner.invoke(compress, obj={})\n # THEN assert the command was succesful even without a valid api\n assert result.exit_code == 0", "def testSC(self):\n\n obt_np = compression.decByteOffet_numpy(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_np).max(), 0.0, \"numpy algo\")\n obt_cy = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds))\n self.assertEqual(abs(self.ds - obt_cy).max(), 0.0, \"cython algo\")\n obt_cy2 = compression.decByteOffet_cython(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_cy2).max(), 0.0, \"cython algo_orig\")\n obt_we = compression.decByteOffet_weave(compression.compByteOffet_numpy(self.ds), self.ds.size)\n self.assertEqual(abs(self.ds - obt_we).max(), 0.0, \"weave algo\")", "def check_zlib():\n\n try:\n import zlib\n zlib.compress('Compress this')\n return True\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error('Failed to import zlib module.')\n return False", "def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())", "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n # make sure we normally go outside the range\n self.assertGreater(np.sum(M1.out < mrate), 0)\n self.assertGreater(np.sum(M1.out > Mrate), 0)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertEqual(np.sum(M2.out < mrate), 0)\n self.assertEqual(np.sum(M2.out > Mrate), 0)", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def test00(self):\n a = np.arange(20)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n # print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n b = bcolz.carray(a, rootdir=self.rootdir,\n cparams=bcolz.cparams(clevel=9, shuffle=filter_))\n # print(\"size b compressed -->\", b.cbytes, \"with '%s'\"%cname)\n self.assertTrue(sys.getsizeof(b) > b.nbytes,\n \"compression does not seem to have any overhead\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_decompress_1(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 49])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabc'\n self.assertEqual(actual, expected)", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def testDecompress(self):\n decompressor = xz_decompressor.LZMADecompressor()\n\n compressed_data = (\n b']\\x00\\x00\\x80\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00*\\x1a\\t\\'d\\x1c'\n b'\\x87\\x8aO\\xcaL\\xf4\\xf8!\\xda\\x88\\xd8\\xff\\xff\\xeb\\xcc\\x00')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger lzma raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger lzma raising IOError.\n decompressor = xz_decompressor.LZMADecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def test01a(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n bcolz.cparams.setdefaults(clevel=9, cname=cname)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def testDecompress(self):\n decompressor = xz_decompressor.XZDecompressor()\n\n compressed_data = (\n b'\\xfd7zXZ\\x00\\x00\\x01i\"\\xde6\\x02\\xc0\\x13\\x0f!\\x01\\x16\\x00\\xc0\\xb7\\xdc'\n b'\\xe9\\x01\\x00\\x0eThis is a test.\\x00\\x00]\\xc9\\xc3\\xc6\\x00\\x01#\\x0f\\xdb'\n b'\\xdf\\x90\\x0e\\x90B\\x99\\r\\x01\\x00\\x00\\x00\\x00\\x01YZ')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger xz raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger xz raising IOError.\n decompressor = xz_decompressor.XZDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def test01c(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n with bcolz.defaults_ctx(bcolz.cparams(clevel=9, cname=cname)):\n self.assertTrue(bcolz.defaults.cparams['cname'] == cname)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test02a(self):\n np.random.seed(10)\n a = np.cumsum(np.random.random_sample(100*1000)-0.5) # random walk\n if common.verbose:\n print(\"Checking quantize filter\")\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n cparams = bcolz.cparams(quantize=0)\n b = bcolz.carray(a, cparams=cparams, rootdir=self.rootdir)\n b_cbytes = b.cbytes\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # print \"size b compressed -->\", b_cbytes\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)\n cparams = bcolz.cparams(quantize=3)\n c = bcolz.carray(a, cparams=cparams, rootdir=self.rootdir)\n # print \"size c compressed -->\", c.cbytes\n self.assertTrue(c.cbytes < 0.7 * b_cbytes,\n \"quantize does not seem to improve compression \"\n \"significantly\")\n assert_array_almost_equal(a, c[:], 3, \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test01(self):\n a = np.arange(2e5)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b uncompressed-->\", b.nbytes\n # print \"size b compressed -->\", b.cbytes\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")", "def test01b(self):\n a = np.arange(1e5)\n cnames = bcolz.blosc_compressor_list()\n if common.verbose:\n print(\"Checking compressors:\", cnames)\n # print \"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize\n for cname in cnames:\n bcolz.defaults.cparams = {\n 'clevel': 9, 'shuffle': bcolz.SHUFFLE, 'cname': cname,\n 'quantize': 0}\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print \"size b compressed -->\", b.cbytes, \"with '%s'\"%cname\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_get_compressed(self):\n self.create_compressed(\"%s/one\" % (self.tests_path), \"some value\")\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n expected_output = \"b'some value'\\n\" if PYTHON3 else \"some value\\n\"\n self.assertEqual(expected_output, self.output.getvalue())", "def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()", "def test_op_no_compression(self):\n assert OP_NO_COMPRESSION == 0x20000", "def test_compression_tanh(self):\n tau = 48.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n mavg = 0.5*(mrate + Mrate)\n mdiff = 0.5*(Mrate - mrate)\n\n expected = mavg + mdiff*np.tanh((M1.out - mavg)/mdiff)\n\n self.assertTrue(np.allclose(M2.out, expected), msg=\n \"mean(abs(out - expected))={}\".format(np.mean(np.abs(M2.out - expected))))", "def test02(self):\n a = np.arange(111)\n b = bcolz.carray(a)\n # print \"size b uncompressed-->\", b.nbytes\n # print \"size b compressed -->\", b.cbytes\n self.assertTrue(sys.getsizeof(b) > b.nbytes,\n \"carray compress too much??\")", "def test_auto_compression():\n with dask.config.set({\"test123\": \"auto\"}):\n try:\n import lz4 # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"lz4\"\n return\n except ImportError:\n pass\n\n try:\n import snappy # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"snappy\"\n except ImportError:\n assert get_compression_settings(\"test123\") is None", "def test01a(self):\n a = np.arange(1e5)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n # print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n bcolz.cparams.setdefaults(clevel=9, shuffle=filter_)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print(\"size b compressed -->\", b.cbytes, \"with '%s'\" % filter_)\n if filter_ > 0:\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test01c(self):\n a = np.arange(1e5)\n filters = bcolz.filters.keys()\n if common.verbose:\n print(\"Checking filters:\", filters)\n # print(\"\\nsize b uncompressed-->\", a.size * a.dtype.itemsize)\n for filter_ in filters:\n with bcolz.defaults_ctx(bcolz.cparams(clevel=9, shuffle=filter_)):\n self.assertTrue(bcolz.defaults.cparams['shuffle'] == filter_)\n b = bcolz.carray(a, rootdir=self.rootdir)\n # print(\"size b compressed -->\", b.cbytes, \"with '%s'\" % filter_)\n if filter_ > 0:\n self.assertTrue(bcolz.defaults.cparams['shuffle'] == bcolz.SHUFFLE)\n self.assertTrue(sys.getsizeof(b) < b.nbytes,\n \"carray does not seem to compress at all\")\n assert_array_equal(a, b[:], \"Arrays are not equal\")\n # Remove the array on disk before trying with the next one\n if self.disk:\n common.remove_tree(self.rootdir)", "def test_uncompressed(mode, size, test_file):\n\n with Image.open(test_file) as im:\n assert im.format == \"DDS\"\n assert im.mode == mode\n assert im.size == size\n\n assert_image_equal_tofile(im, test_file.replace(\".dds\", \".png\"))", "def test_is_spring_decompression_needed_when_false(\n populated_compress_api_fastq_spring: CompressAPI,\n analysis_store_single_case: Store,\n case_id: str,\n):\n\n # GIVEN a populated prepare_fastq_api\n prepare_fastq_api = PrepareFastqAPI(\n store=analysis_store_single_case, compress_api=populated_compress_api_fastq_spring\n )\n # GIVEN a store with a case that has linked samples\n case_obj: Family = analysis_store_single_case.get_case_by_internal_id(internal_id=case_id)\n assert case_obj\n # GIVEN that the case has linked samples\n link_objects = [link_obj for link_obj in case_obj.links]\n assert link_objects\n\n # WHEN checking if spring decompression is needed\n res = prepare_fastq_api.is_spring_decompression_needed(case_id)\n\n # THEN assert that spring decompression is not needed since there are fastq files\n assert res is False" ]
[ "0.6980046", "0.6719932", "0.60160774", "0.5986958", "0.59624964", "0.5954615", "0.59477985", "0.5935714", "0.5780253", "0.5716293", "0.5647769", "0.5639771", "0.5574951", "0.5552292", "0.5536779", "0.5534533", "0.5488307", "0.5451174", "0.5443359", "0.5421884", "0.5395015", "0.5376116", "0.53424716", "0.5330844", "0.532989", "0.5329046", "0.5326714", "0.531421", "0.526491", "0.5263375" ]
0.86720395
0
Classifies an article above all the tree, which you can see in node structure Gets Node structure, article, diff_coef, which is metioned above Returns a dictionary, which is used rucursively to create categories and subcategories and ... which this article is in
def article_classification_tree(article: str, node=make_node_structure(), diff_coef=.1) -> dict: if not node.get_children_dict(): return {} diction = {} for category in article_classification(article, node, diff_coef): diction[category] = article_classification_tree(article, node.get_children_dict()[category]) return diction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def article_classification(article: str, node: Node, diff_coef=.1) -> list:\n\n x_test = node.get_vectorizer().transform([article])\n y_pred = node.get_model().predict_proba(x_test).reshape(-1)\n\n result = np.array(list(node.get_children_dict()))[y_pred > (y_pred.max() - diff_coef)]\n\n return result", "def flatten(orig):\n\n\t# Empty dictionary\n\tdata = {}\n\tfor c in orig['tree']['children']:\n\t\t# in operator\n\t\tif 'children' in c:\n\t\t\tfor c2 in c['children']:\n\t\t\t\tif 'children' in c2:\n\t\t\t\t\tfor c3 in c2['children']:\n\t\t\t\t\t\tif 'children' in c3:\n\t\t\t\t\t\t\tfor c4 in c3['children']:\n\t\t\t\t\t\t\t\tif (c4['category'] == 'personality'):\n\t\t\t\t\t\t\t\t\tdata[c4['id']] = c4['percentage']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif (c3['category'] == 'personality'):\n\t\t\t\t\t\t\t\tdata[c3['id']] = c3['percentage']\n\n\treturn data", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def get_structure():\n\n _articles = []\n _categories = []\n\n def get_article(article_filename, general_category, sep='|||'):\n \"\"\"\n Adds the given article to the \"articles, categories, general\" current structure\n \"\"\"\n category = []\n with open(article_filename) as _f:\n for _row in _f:\n if len(_row):\n if _row[0] == '=':\n # new category\n k = 0\n while _row[k] == '=':\n k += 1\n if k > 1:\n category = category[:k - 1]\n category += [clean_text(_row)]\n sub_category = []\n elif _row[0] == '#':\n # new entry\n _articles.append(clean_text(_row))\n k = 0\n while _row[k] == '#':\n k += 1\n sub_category = sub_category[:k - 1] + [clean_text(_row)]\n if category[0] == general_category:\n _categories.append(sep.join(category + sub_category[:-1]))\n else:\n _categories.append(sep.join([general_category] + category + sub_category[:-1]))\n\n categories_dict = get_categories('https://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Level/5')\n _general = {k: v.split('/')[5] for k, v in categories_dict.items()}\n filenames = list(categories_dict.keys())\n\n if not os.path.exists('wikivitals/data/mds/'):\n os.makedirs('wikivitals/data/mds/')\n\n for k, v in categories_dict.items(): # saves the category pages' text\n with open('wikivitals/data/mds/{}'.format(k), 'w', encoding='utf8') as f:\n url = \"https://en.wikipedia.org/w/index.php?title={}&action=edit\".format(v[6:])\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n f.write(soup.find('textarea').text)\n\n for filename in filenames:\n get_article('wikivitals/data/mds/' + filename, _general[filename])\n\n with open('wikivitals/data/en-categories.txt', 'w', encoding='utf8') as file:\n for cat in _categories:\n file.write(cat + \"\\n\")\n\n with open('wikivitals/data/en-articles.txt', 'w', encoding='utf8') as file:\n for name in _articles:\n file.write(name + \"\\n\")\n\n return _articles, _categories, _general", "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def _get_graph_based_ic_dictionary(self):\n\n\t\t# TODO find the literature reference or presentation where this equation is from instead of just the presentation.\n\n\t\t#ic_dict = {}\n\t\t#num_terms_in_ontology = len(self)\n\t\t#for term in self.terms():\n\t\t#\tdepth = self._depth_dict[term.id]\n\t\t#\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t#\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t#\tic_dict[term.id] = ic_value\n\t\t#return(ic_dict)\n\n\n\t\t# Getting the information content of each term in the ontology based on graph structure.\n\t\tic_dict = {}\n\t\tnum_terms_in_ontology = len(self)\n\t\tfor term in self.terms():\n\t\t\tdepth = self._depth_dict[term.id]\n\t\t\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t\tic_dict[term.id] = ic_value\n\n\n\t\t# Converting to weights based on information content rather than raw value.\n\t\tic_dict_as_weights = {}\n\t\tic_values = ic_dict.values()\n\t\tmin_ic = min(ic_values)\n\t\tmax_ic = max(ic_values)\n\t\tnew_max = 1.00\n\t\tnew_min = 0.00\n\t\tfor k,v in ic_dict.items():\n\t\t\told_range = max_ic-min_ic\n\t\t\tnew_range = new_max-new_min\n\t\t\tnew_value = (((v - min_ic) * new_range) / old_range) + new_min\n\t\t\tic_dict_as_weights[k] = new_value\n\n\t\treturn(ic_dict, ic_dict_as_weights)", "def classify(observations, tree, dataMissing=False):\n\n def classifyWithoutMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n #if v >= tree.value: branch = tree.trueBranch\n #else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithoutMissingData(observations, branch)\n\n\n def classifyWithMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n if v == None:\n tr = classifyWithMissingData(observations, tree.trueBranch)\n fr = classifyWithMissingData(observations, tree.falseBranch)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount)/(tcount + fcount)\n fw = float(fcount)/(tcount + fcount)\n result = collections.defaultdict(int) # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n for k, v in tr.items(): result[k] += v*tw\n for k, v in fr.items(): result[k] += v*fw\n return dict(result)\n else:\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n # if v >= tree.value: branch = tree.trueBranch\n # else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithMissingData(observations, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(observations, tree)\n else:\n return classifyWithoutMissingData(observations, tree)", "def improve_tree(tree, freq_dict):\n # todo", "def __init__(self, dataset: List[SEMData], file_name):\n\n tree = Tree(file_name)\n\n self.nodes = dict()\n self.n_nodes = 0\n\n\n self.nodes['N0'] = SEMTreeNode('node')\n self.nodes['N0'].add_dist('N1', 20.8)\n self.nodes['N0'].add_dist('N3', 20.8)\n\n self.nodes['N1'] = SEMTreeNode('node')\n self.nodes['N1'].add_dist('N0', 20.8)\n self.nodes['N1'].add_dist('N2', 33.7)\n self.nodes['N1'].add_dist('NKL', 112.3)\n self.nodes['N1'].add_dist('B19', 112.3)\n\n self.nodes['NKL'] = SEMTreeNode('leaf')\n self.nodes['NKL'].add_dist('N1', 112.3)\n\n self.nodes['B19'] = SEMTreeNode('leaf')\n self.nodes['B19'].add_dist('N1', 112.3)\n\n self.nodes['N2'] = SEMTreeNode('node')\n self.nodes['N2'].add_dist('N1', 33.7)\n self.nodes['N2'].add_dist('CD4', 78.6)\n self.nodes['N2'].add_dist('CD8', 78.6)\n\n self.nodes['CD4'] = SEMTreeNode('leaf')\n self.nodes['CD4'].add_dist('N2', 78.6)\n\n self.nodes['CD8'] = SEMTreeNode('leaf')\n self.nodes['CD8'].add_dist('N2', 78.6)\n\n self.nodes['N3'] = SEMTreeNode('node')\n self.nodes['N3'].add_dist('N0', 20.8)\n self.nodes['N3'].add_dist('MON', 41.8)\n self.nodes['N3'].add_dist('NEU', 112.3)\n\n self.nodes['NEU'] = SEMTreeNode('leaf')\n self.nodes['NEU'].add_dist('N3', 112.3)\n\n self.nodes['MON'] = SEMTreeNode('leaf')\n self.nodes['MON'].add_dist('N3', 41.8)\n self.nodes['MON'].add_dist('DEN', 70.5)\n self.nodes['MON'].add_dist('MRF', 70.5)\n\n self.nodes['MRF'] = SEMTreeNode('leaf')\n self.nodes['MRF'].add_dist('MON', 70.5)\n\n self.nodes['DEN'] = SEMTreeNode('leaf')\n self.nodes['DEN'].add_dist('MON', 70.5)\n\n\n # self.get_nodes(tree)\n print(tree)\n\n # # Compare names of datasets and leaves of the tree\n # data_names = [data.name for data in dataset]\n # print(data_names)\n # for name in data_names:\n # if name not in self.nodes.keys():\n # raise ValueError('Dataset and Tree do not match')", "def _parse(self, tree):\n date_el = self.get_etree().xpath(DATE_XP)[0]\n self.date = date_el.attrib['value']\n self.year, self.month, self.day = self.date.split('-')\n self.date_text = date_el.text\n\n def resolve_type(element):\n return element.attrib.get('type', '').lower().strip('. ')\n\n def index_entity(nodes, model, article):\n for n in nodes:\n m = model(n, article)\n if m.ok:\n db.session.add(m)\n\n def get_html(article):\n return html.tostring(tei.build(etree.Element('article'), article))\n\n root = self.get_etree()\n for section in root.xpath('//div1'):\n section_type = resolve_type(section)\n if not section_type:\n continue\n for subsection in section.xpath('./div2'):\n subsection_type = resolve_type(subsection)\n if not subsection_type:\n continue\n for article in subsection.xpath('./div3'):\n article_type = resolve_type(article)\n if article_type == 'ad-blank':\n continue\n a = Article(issue_id=self.id,\n date=self.date,\n section_type=section_type,\n subsection_type=subsection_type,\n article_type=article_type,\n xpath=root.getpath(article),\n content=get_html(article))\n db.session.add(a)\n db.session.flush()\n index_entity(article.xpath('.//persName'), PersName, a)\n index_entity(article.xpath('.//placeName'), PlaceName, a)\n index_entity(article.xpath('.//orgName'), OrgName, a)\n index_entity(article.xpath('.//rs'), RefString, a)", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def make_node_structure() -> Node:\n\n agricult = Node('argicult', os.path.join(os.path.abspath(os.curdir),\n 'finance/agricult/agricult_classifier_RF.sav'),\n os.path.join(os.path.abspath(os.curdir),\n 'finance/agricult/agricult_classifier_CV.sav'),\n {'cattle': Node('cattle'), 'corn': Node('corn'),\n 'soybean': Node('soybean'), 'sugar': Node('sugar')})\n\n crypto = Node('crypto', os.path.join(os.path.abspath(os.curdir),\n 'finance/crypto/crypto_classifier_RF.sav'),\n os.path.join(os.path.abspath(os.curdir),\n 'finance/crypto/crypto_classifier_CV.sav'),\n {'bitcoin': Node('bitcoin'), 'dash': Node('dash'),\n 'ethereum': Node('ethereum'), 'litecoin': Node('litecoin'),\n 'monero': Node('monero'), 'ripple': Node('ripple'), 'zash': Node('zash')}) \n\n energy = Node('energy', os.path.join(os.path.abspath(os.curdir),\n 'finance/energy/energy_classifier_RF.sav'),\n os.path.join(os.path.abspath(os.curdir),\n 'finance/energy/energy_classifier_CV.sav'),\n {'brent crude': Node('brent crude'), 'coal': Node('coal'),\n 'crude oil': Node('crude oil'), 'natural gas': Node('natural gas')})\n\n metals = Node('metals', os.path.join(os.path.abspath(os.curdir),\n 'finance/metals/metals_classifier_RF.sav'),\n os.path.join(os.path.abspath(os.curdir),\n 'finance/metals/metals_classifier_CV.sav'),\n {'gold': Node('gold'), 'iron': Node('iron'),\n 'platinum': Node('platinum'), 'silver': Node('silver')})\n\n finance = Node('finance', os.path.join(os.path.abspath(os.curdir),\n 'finance/finance_classifier_RF.sav'),\n os.path.join(os.path.abspath(os.curdir),\n 'finance/finance_classifier_CV.sav'),\n {'agricult': agricult, 'crypto': crypto,\n 'energy': energy, 'metals': metals})\n\n return finance", "def dependency_parse(nlp, article):\n if type(article) == str:\n parsed = nlp(article)\n return [[{\"sender\": {k[1:]: dep[0].__dict__[k] for k in dep[0].__dict__ if not k.startswith(\"_parent\")},\n \"edge\": dep[1],\n \"receiver\": {k[1:]: dep[2].__dict__[k] for k in dep[2].__dict__ if not k.startswith(\"_parent\")}}\n for dep in sentence.dependencies] for sentence in parsed.sentences]\n elif type(article) == list:\n parsed = [nlp(sentence) for sentence in article]\n return [[[{\"sender\": {k[1:]: dep[0].__dict__[k] for k in dep[0].__dict__ if not k.startswith(\"_parent\")},\n \"edge\": dep[1],\n \"receiver\": {k[1:]: dep[2].__dict__[k] for k in dep[2].__dict__ if not k.startswith(\"_parent\")}}\n for dep in sentence.dependencies] for sentence in p.sentences] for p in parsed]", "def classify(data_point, tree):\r\n current = tree\r\n while(current.is_leaf == False): #while we're not at a leaf\r\n q = tree.issue\r\n v = data_point.dat_votes[ord(q) - 97]\r\n if(current is None): pass\r\n current = current.get_classification(v)\r\n #we should now be at a Leaf\r\n if(current is None): print(\"FATAL\")\r\n c =current.get_classification(\"\")\r\n # print(\"classified: \" + str(data_point) + \" as \" + str(c))\r\n return c", "def create_nodes(self, articles, merges):\n # self.all_nodes = [RealNode(a) for a in articles]\n if merges is None:\n for article in articles:\n matching_layer = self.find_layer_by_name(article[\"year\"])\n matching_layer.create_node(article)\n else:\n for merge in merges:\n art1 = next(x for x in articles if x['key'] == merge['art1'])\n art2 = next(x for x in articles if x['key'] == merge['art2'])\n if 'art3' in merge:\n art3 = next(x for x in articles if x['key'] == merge['art3'])\n matching_layer = self.find_layer_by_name(art1['year'])\n merge_key = art1['key'] + art2['key']\n merge_dict = {'key': merge_key, 'year': matching_layer.name, 'art1': art1, 'art2': art2, 'merge': merge}\n if 'art3' in merge:\n merge_key += art3['key']\n merge_dict = {'key': merge_key, 'year': matching_layer.name, 'art1': art1, 'art2': art2, 'art3': art3, 'merge': merge}\n new_node = matching_layer.create_node(merge_dict)\n new_node.kind = \"Merge\"\n for article in articles:\n merge_art = False\n for merge in merges:\n if article['key'] == merge['art1'] or article['key'] == merge['art2'] or \\\n ('art3' in merge and article['key'] == merge['art3']):\n merge_art = True\n break\n if not merge_art:\n matching_layer = self.find_layer_by_name(article[\"year\"])\n matching_layer.create_node(article)", "def hierachy_nomenclature(a2_data):\n ret_dic = OrderedDict()\n ret_dic['X'] = OrderedDict()\n ret_dic['X']['name'] = a2_data['xs'].keys()\n ret_dic['X']['N'] = len(a2_data['xs'].keys())\n ret_dic['I'] = OrderedDict()\n ret_dic['I']['name'] = a2_data['xs']['1'].keys()\n ret_dic['I']['N'] = len(a2_data['xs']['1'].keys())\n ret_dic['R'] = OrderedDict()\n ret_dic['R']['name'] = a2_data['xs']['1']['U235'].keys()\n ret_dic['R']['N'] = len(a2_data['xs']['1']['U235'].keys())\n ret_dic['G'] = OrderedDict()\n ret_dic['G']['name'] = a2_data['xs']['1']['U235']['abso'].keys()\n ret_dic['G']['N'] = len(a2_data['xs']['1']['U235']['abso'].keys())\n return ret_dic", "def annotate(self):\n logger.debug(f\"found ckt:{self.hier_graph_dict}\")\n\n names = list(self.hier_graph_dict)\n\n for name in names:\n circuit_name= name\n G1 = self.hier_graph_dict[name][\"graph\"]\n self._group_block_const(G1,circuit_name)\n self._group_cap_const(G1,circuit_name)\n\n for circuit_name in list(self.hier_graph_dict.keys()):\n logger.debug(f\"START MATCHING in circuit: {circuit_name}\")\n circuit = self.hier_graph_dict[circuit_name]\n G1 = circuit[\"graph\"]\n # map and reduce graph to dictionary\n mapped_graph_list = self._mapped_graph_list(G1, circuit_name, self.pg )\n const_list = self.hier_graph_dict[circuit_name]['constraints']\n self.hier_graph_dict[circuit_name][\"graph\"] = self._reduce_graph(G1, circuit_name, mapped_graph_list, const_list)\n \n for const in list(const_list):\n self._check_const_length(self.hier_graph_dict[circuit_name].constraints,const)\n check_nodes(self.hier_graph_dict)\n logger.debug(f\"Grest ckt is {circuit['graph'].nodes(data=True)}\")\n if circuit_name not in self.no_array:\n symmetry_blocks = FindSymmetry(circuit[\"graph\"], circuit[\"ports\"], circuit[\"ports_weight\"], self.stop_points)\n for symm_blocks in symmetry_blocks.values():\n logger.debug(f\"generated constraints: {pprint.pformat(symm_blocks, indent=4)}\")\n if isinstance(symm_blocks, dict) and \"graph\" in symm_blocks.keys():\n logger.debug(f\"added new hierarchy: {symm_blocks['name']} {symm_blocks['graph'].nodes()}\")\n self.hier_graph_dict[symm_blocks['name']] = symm_blocks\n assert False, \"Don't understand what's being deleted here\"\n del self.hier_graph_dict[symm_blocks['name']]['name']\n\n self.lib_names = [lib_ele['name'] for lib_ele in self.lib]\n for ckt_name, circuit in self.hier_graph_dict.items():\n if 'id' in self.hier_graph_dict[ckt_name] and len(self.hier_graph_dict[ckt_name]['id']) > 1:\n copies = len(self.hier_graph_dict[ckt_name]['id'])\n self.lib_names += [ckt_name + '_type' + str(n) for n in range(copies)]\n return self.lib_names", "def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case", "def get_nodes_context(self, node_type_map):\n\n # first let's get the number of nodes of subgraphs rooted at each node, we make use of the special vertex indexing our representation has\n # by traversing in a decreasing order of vertex label, we can compute the number of leaves under each node in one pass\n # this is helpful if we want to compute a special kind of lis\n\n # num_nodes = {} # table of number of nodes of subgraphs rooted at the given node\n # n = self.graph.num_vertices()\n # for v_idx in reversed(range(n)):\n # v = self.graph.vertex(v_idx)\n # if v.out_degree() == 0: # leaf\n # num_nodes[v_idx] = 1\n # else:\n # print(v)\n # num_nodes[v_idx] = 1\n # for u in v.out_neighbors():\n # num_nodes[v_idx] += num_nodes[int(u)]\n\n # print(\"ratios:\")\n # for v in self.graph.vertices():\n # if v == 0: continue\n # print(\"{}:{}\".format(v, num_nodes[v] / (num_nodes[next(v.in_neighbors())] - 1)))\n\n # dump contexts in the form of:\n # [num_children, parent_type, child_1type, child_2type, ..., l_parent, l_child1, l_child2 ...]\n # where l_s are set to 1 for now\n get_type_idx = lambda _: node_type_map[self.graph.vp['type'][_]]\n contexts = []\n for v in self.graph.vertices():\n n = v.out_degree() # number of children\n if n != 0: # not a leaf\n contexts.append([n, get_type_idx(v)])\n for u in v.out_neighbors():\n contexts[-1].append(get_type_idx(u))\n contexts[-1].extend([1] * (n + 1))\n # print(contexts)\n return contexts", "def tree_report(tree, unknown_summary, debug=False):\n ##########################################################################\n def get_equation_html_from_id(equation_id):\n if list(tree)[0].split('_')[0] == 'a':\n return '<span class=\"param\" data-type=\\\"eq\\\" data-item=\\\"'+\\\n equation_id+\\\n '\\\">this equation</span>'\n else:\n name_of_eq = equation_id.split('_')[1]\n page_of_eq = eqbank[name_of_eq][\"group\"]\n return \\\n '<span class=\"param\" data-type=\\\"pallette-eq\\\" data-page=\\\"'\\\n +str(page_of_eq)\\\n +'\\\" data-item=\\\"'+\\\n str(name_of_eq)+\\\n '\\\">this equation</span>'\n \n def get_assoc_html(var_term):\n return '<span class=\"param\" data-type=\\\"var-assoc\\\" data-item=\\\"'+\\\n str(var_term)+\\\n '\\\">'+\\\n str(unknown_summary[list(unknown_summary[var_term])[0]]['value']['varDisplay'])+\\\n '</span>'\n\n def construct_phrase_desc_rec(node, parent):\n l_children = {\n child: len([_ for _ in tree[child] if _ != node])\n for child in tree[node]\n if child != parent\n }\n if debug:\n print(\"In tree_report\")\n print(l_children)\n \n # If the root node has any substitutions, address it accordingly\n # and only report it as such, no need for breakdowns.\n if \"equationlist\" in tree.nodes[node]:\n current_phrase = []\n \n subs_children = {\n eq: tree.nodes[node][\"equationlist\"][eq]['term'] \\\n for eq in tree.nodes[node][\"equationlist\"]\n if tree.nodes[node][\"equationlist\"][eq]['substituted'] == True\n }\n \n # if node has * \n # and children has -1, include negative\n # and if node has > 1 subs_children,\n # include product of\n if tree.nodes[node]['label'] == '*':\n if \"NegativeOne\" in [\n _.split('_')[2] \n for _ in nx.neighbors(tree, node) \n if _ !=tree.nodes[node]['pred']\n ]:\n if parent==None:\n # Top level product with a negative sign\n current_phrase.append(\"negative\")\n elif parent['label'] == '+':\n # The parent above node is +, so this must be a negative term\n current_phrase.append(\"subtracted by\")\n if len(subs_children)>1:\n current_phrase.append(\"product of\")\n \n # if node has + \n # and if node has > 1 subs_children,\n # include sum of\n if tree.nodes[node]['label'] == '+'\\\n and len(subs_children)>1:\n current_phrase.append(\"sum of\")\n \n #if len(subs_children) == 1:\n # host_eq = list(subs_children)[0]\n # var_term = subs_children[host_eq]\n # # only one substituted term, nothing fancy\n # current_phrase.append(\n # get_assoc_html(var_term)+' from '+get_equation_html_from_id(host_eq)\n # )\n #\n #elif len(subs_children) > 1:\n \n if len(subs_children) > 0:\n term_list = []\n for host_eq, var_term in subs_children.items():\n term_list.append(\n get_assoc_html(var_term)+' from '+get_equation_html_from_id(host_eq)\n ) \n current_phrase.append(\",\".join(term_list))\n \n if debug:\n print(current_phrase)\n return \" \".join(current_phrase)\n\n # If the node is a leaf node\n if len(l_children) == 0:\n # Substitute this with the \n # proper term related to each leaf node\n \n # Add utility function bits for comparison\n if is_symbol(tree, node):\n return get_parambox_html(node, tree)\n else:\n return tree.nodes[node]['label']\n \n child_phrases_list = {\n child: construct_phrase_desc_rec(child, node)\n for child in l_children\n }\n \n current_phrase = [];\n \n if tree.nodes[node]['label'] == '+':\n # Addition - \"sum of\" etc.\n # except: if only one child exists, just mention this child.\n # Separate out the negative and positive terms,\n # negative terms begin with \"subtracted by\"\n \n # if parent==None:\n # current_phrase.append(\"sum of\")\n # elif len(l_children) == 1\\\n # or len([_ for _ in l_children if l_children[_]==0])==1:\n # pass\n # else:\n # current_phrase.append(\"sum of\")\n \n texts = {\"single\":[], \"subtree\": []}\n for child in l_children:\n # Check if the children of node are leaves or not\n if l_children[child] == 0:\n if debug:\n print(f\"at {node}--> child:{child} phrase:{child_phrases_list[child]}\")\n texts[\"single\"].append(child_phrases_list[child])\n elif l_children[child] > 0:\n texts[\"subtree\"].append(child_phrases_list[child])\n \n if len(texts[\"single\"]) + len(texts[\"subtree\"]) > 1:\n current_phrase.append(\"sum of\")\n \n if len(texts[\"single\"]):\n current_phrase\\\n .append(\",\".join(texts[\"single\"]))\n \n if len(texts[\"single\"]) and len(texts[\"subtree\"]):\n current_phrase.append(\"and\")\n \n if len(texts[\"subtree\"]):\n current_phrase\\\n .append(\",\".join(texts[\"subtree\"]))\n \n elif tree.nodes[node]['label'] == '*':\n labels_children = [child_phrases_list[_] for _ in child_phrases_list]\n if \"-1\" in labels_children:\n if parent==None:\n # Top level product with a negative sign\n current_phrase.append(\"negative\")\n elif tree.nodes[parent]['label'] == '+':\n # The parent above node is +, so this must be a negative term\n current_phrase.append(\"subtracted by\")\n # removing -1 from future considerations, we've already accounted for this.\n l_children = {_:l_children[_] for _ in l_children if tree.nodes[_]['label'] != '-1'}\n \n # if parent==None:\n # current_phrase.append(\"product of\")\n # elif len(l_children) == 1\\\n # or len([_ for _ in l_children if l_children[_]==0])==1:\n # pass\n # else:\n # current_phrase.append('product of')\n \n # directly append the leaf node labels,\n # then separately append the results of the sum on the next level\n texts = {\"single\":[], \"subtree\": []}\n for child in l_children:\n # Check if the children of node are leaves or not\n if l_children[child] == 0:\n texts[\"single\"].append(child_phrases_list[child])\n elif l_children[child] > 0:\n texts[\"subtree\"].append(child_phrases_list[child])\n \n if len(texts[\"single\"]) + len(texts[\"subtree\"]) > 1:\n current_phrase.append(\"product of\")\n \n if len(texts[\"single\"]):\n current_phrase\\\n .append(\",\".join(texts[\"single\"]))\n \n if len(texts[\"single\"]) and len(texts[\"subtree\"]):\n current_phrase.append(\"and\")\n \n if len(texts[\"subtree\"]):\n current_phrase\\\n .append(\",\".join(texts[\"subtree\"]))\n \n elif tree.nodes[node]['label'] == '^':\n labels_children = [child_phrases_list[_] for _ in child_phrases_list]\n if \"-1\" in labels_children:\n if parent!=None and tree.nodes[parent]['label'] == '*':\n # The parent above node is +, so this must be a negative term\n current_phrase.append(\"divided by\")\n current_phrase.extend([\n child_phrases_list[_] \n for _ in l_children if tree.nodes[_]['label'] != '-1'\n ])\n if parent==None:\n # Top level product with a negative sign\n current_phrase.append(\"raised to power (-1)\")\n # Update:\n # If power is integer and <0\n # If -1, say divided by\n # If <-1, say divided by __ raised to the power\n # Otherwise, Say raised to power of (integer or expr)\n # Eg: MediumProblem\n \n return \" \".join(current_phrase)\n ##########################################################################\n \n return construct_phrase_desc_rec(find_root(tree), None)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def toDict(self,treeRoot,nodesList):\n risk = round(float(treeRoot[\"impact\"]) * self.probability, 4)\n prob = round(self.probability, 4)\n leafKeys = self.nodeKeys\n keyTextPairs = []\n for key in leafKeys:\n node = findNode(key,nodesList)\n keyTextPairs.append([key, node[\"text\"]])\n dit = {\n \"risk\" : risk,\n \"probability\" : prob,\n \"leafKeys\" : keyTextPairs\n }\n return dit", "def _get_term_depth_dictionary(self):\n\n\n\n\t\t# Find the root term(s) of the ontology.\n\t\troot_term_ids = []\n\t\tfor term in self.terms():\n\t\t\t# Check if this term has no inherited terms (is a root), discounting terms that are obsolete.\n\t\t\tinherited_terms = [t for t in term.superclasses(with_self=False)]\n\t\t\tif (len(inherited_terms)==0) and (term.name is not None) and (\"obsolete\" not in term.name):\n\t\t\t\troot_term_ids.append(term.id)\n\t\t\t\t\n\t\t# Find the depths of all terms in the ontology below those terms.\n\t\tdepths = {i:0 for i in root_term_ids}\n\t\tdepth = 1\n\t\tdone = False\n\t\twhile not done:\n\t\t\t\n\t\t\t# Add all the terms immediately below \n\t\t\tbefore = len(depths)\n\t\t\tnew_terms = []\n\t\t\tfor old_term_id in [i for i in depths.keys() if depths[i] == depth-1]:\n\t\t\t\tfor new_term_id in [t.id for t in self[old_term_id].subclasses(with_self=False,distance=1)]:\n\t\t\t\t\tif new_term_id not in depths:\n\t\t\t\t\t\tdepths[new_term_id] = depth\n\t\t\t\n\t\t\t# Increment the depth and see if any new terms were added to the distance dictionary during this pass.\n\t\t\tdepth = depth + 1\n\t\t\tafter = len(depths)\n\t\t\tif before == after:\n\t\t\t\tdone = True\n\t\t\t\t\n\t\t# Add any other remaining terms to the dictionary with a depth of 0 indicating minimal specificity.\n\t\tfor term in self.terms():\n\t\t\tif term.id not in depths:\n\t\t\t\tdepths[term.id] = 0\n\t\t\n\t\t# Return the dictionary mapping term IDs to their depth in the hierarchy.\n\t\treturn(depths)", "def _analyze(node: dict, depth=0, info=defaultdict(int)):\n info[\"depth\"] = max(info[\"depth\"], depth)\n for key in node.keys():\n if key == ITEMSKEY:\n info[\"georecord_containers\"] += 1\n info[\"georecord_items\"] += len(node[key])\n elif key == SUFFIXKEY:\n info[\"suffix_containers\"] += 1\n info[\"suffix_items\"] += len(node[key])\n else:\n info[\"prefix_nodes\"] += 1\n _analyze(node[key], depth + 1, info)\n return info", "def recluster(input_jet, alpha=None):\n\n def _rec(jet, parent, node_id, outers_list):\n \"\"\"\n Recursive function to get a list of the leaves\n \"\"\"\n if jet[\"tree\"][node_id, 0] == -1:\n outers_list.append(jet[\"content\"][node_id])\n else:\n _rec(jet, node_id, jet[\"tree\"][node_id, 0], outers_list)\n _rec(jet, node_id, jet[\"tree\"][node_id, 1], outers_list)\n\n return outers_list\n\n outers = []\n jet_const = np.asarray(_rec(input_jet, -1, input_jet[\"root_id\"], outers))\n\n raw_tree, \\\n idx, \\\n jet_content, \\\n root_node, \\\n Nconst, \\\n N_leaves_list, \\\n linkage_list = ktAntiktCA(jet_const, alpha=alpha)\n\n tree, \\\n content, \\\n node_id, \\\n tree_ancestors = _traverse(root_node,\n jet_content,\n tree_dic=raw_tree,\n root_idx=None,\n Nleaves=Nconst,\n )\n\n jet = {}\n jet[\"root_id\"] = 0\n jet[\"tree\"] = np.asarray(tree).reshape(-1, 2)\n jet[\"content\"] = np.asarray([np.asarray(c) for c in content]).reshape(-1, 2)\n\n # print(jet)\n\n # Save reclustered tree\n out_dir = 'data/'\n algo = str(jet_dic[\"name\"]) + '_' + str(alpha)\n out_filename = out_dir + str(algo) + '.pkl'\n print('out_filename=', out_filename)\n with open(out_filename, \"wb\") as f:\n pickle.dump(jet, f, protocol=2)\n\n return node_id, linkage_list, Nconst, tree_ancestors", "def linkage(self):\n self.tree = {}\n un_linked = []\n for i in range(len(self.leaves)):\n leaf = self.leaves[i]\n un_linked.append({\n 'id': i,\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': leaf,\n 'children': []\n })\n pass\n while len(un_linked) > 1:\n # for i in tqdm(range(len(un_linked))):\n # print(\"Linking... {} nodes left\".format(len(un_linked)))\n for node in un_linked:\n for d in node['set']:\n node['x'] += d['x']\n node['y'] += d['y']\n node['value'] += d['value']\n pass\n node['x'] /= len(node['set'])\n node['y'] /= len(node['set'])\n node['value'] /= len(node['set'])\n pass\n # min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2) \\\n # * self._alpha + (un_linked[1]['value'] - un_linked[0]['value']) * (1 - self._alpha)\n min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2)\n min_cp = [0, 1]\n for i in range(len(un_linked) - 1):\n for j in range(i + 1, len(un_linked)):\n # dif = self._alpha * ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n # + (un_linked[j]['x'] - un_linked[i]['x']) ** 2) \\\n # + (1 - self._alpha) * (un_linked[j]['value'] - un_linked[i]['value'])\n dif = ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n + (un_linked[j]['x'] - un_linked[i]['x']) ** 2)\n if dif < min_dif:\n min_dif = dif\n min_cp = [i, j]\n pass\n pass\n pass\n set_a = []\n for each in un_linked[min_cp[0]]['set']:\n set_a.append(each)\n pass\n for each in un_linked[min_cp[1]]['set']:\n set_a.append(each)\n pass\n next_un_linked = []\n new_children = []\n if len(un_linked[min_cp[0]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[0]]['children'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[0]]['id'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n if len(un_linked[min_cp[1]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[1]]['children'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[1]]['id'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n next_un_linked.append({\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': set_a,\n 'children': new_children\n })\n del un_linked[min_cp[0]]['set']\n del un_linked[min_cp[0]]['x']\n del un_linked[min_cp[0]]['y']\n # del un_linked[min_cp[0]]['value']\n del un_linked[min_cp[1]]['set']\n del un_linked[min_cp[1]]['x']\n del un_linked[min_cp[1]]['y']\n # del un_linked[min_cp[1]]['value']\n for s in range(len(un_linked)):\n if s not in min_cp:\n next_un_linked.append(un_linked[s])\n pass\n pass\n un_linked = next_un_linked\n pass\n del un_linked[0]['set']\n del un_linked[0]['x']\n del un_linked[0]['y']\n # del un_linked[0]['value']\n self.tree = un_linked[0]\n self._count = 0\n\n self.tree = self._resolve(self.tree)\n return", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])" ]
[ "0.67828876", "0.580559", "0.565238", "0.5516238", "0.54888904", "0.5372797", "0.5338573", "0.53100866", "0.5291282", "0.5277046", "0.52112967", "0.51765364", "0.5080656", "0.5072973", "0.5041243", "0.50386095", "0.5009236", "0.49799195", "0.4965747", "0.49480772", "0.49470168", "0.49305123", "0.4926746", "0.4919436", "0.49147004", "0.4905486", "0.49010056", "0.48972112", "0.4883595", "0.48776057" ]
0.8112684
0
Show all players with id in players.json in alphabetical order or not according to the user choice
def show_players(self) -> None: players_list = [] for player in PLAYERS: data_player = (( str(player.get("first_name")) + " " + str(player.get("last_name")) + " | " + str(player.get("birthday")) + " | " + str(player.get("genre")) + " | " + str(player.get("ranking")) )) players_list.append(data_player) utils.clear_terminal() print( "Do you want the list of players by alphabetical order or by ranking ? \n" "1 - Ranking players list \n" "2 - Alphabetical players list" ) choice = check.request_selection_with_number("ranking", "alphabetical", "None") if choice == "ranking": player_id = 0 players_list = sorted(players_list, key=lambda player: players_list[4]) utils.clear_terminal() print("==========================================") print("List of all Players in ranking order : ") print("==========================================") for player in players_list: player_id += 1 print(str(player_id) + " : " + player) elif choice == "alphabetical": player_id = 0 players_list.sort() utils.clear_terminal() print("============================================") print("List of all Players in alphabetical order : ") print("============================================") for player in players_list: player_id += 1 print(str(player_id) + " : " + player)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)", "def display_tournament_player_list(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n player_list = list()\r\n for rated_player in tournament['Classement']:\r\n player_list.append(players_table.get(doc_id=rated_player[0]))\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les joueurs du tournoi de\", tournament_name, \": \")\r\n if user_choice == '1':\r\n player_list.sort(key=lambda x: x['Nom'])\r\n for player in player_list:\r\n print(player)\r\n elif user_choice == '2':\r\n player_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in player_list:\r\n print(player)", "def get_queryset(self):\n return Player.objects.order_by('player_name')", "def display_imported_players(players_id_list):\r\n for player_id in players_id_list:\r\n print(players_table.get(doc_id=player_id))", "def select_players():\n database = TinyDB('db.json')\n # recuperation de tous les joueurs de la base de données\n list_players = database.table('players').all()\n sorted(list_players, key=itemgetter('Classement'), reverse=True)\n dico_trie = sorted(list_players, key=itemgetter('Prenom', 'Nom'))\n return dico_trie", "def get_queryset(self):\n return Player.Player.objects.order_by('first_name')", "def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)", "def display_actor_list(self):\r\n actor_list = list()\r\n for actor in players_table:\r\n actor_list.append(actor)\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les acteurs: \")\r\n if user_choice == '1':\r\n actor_list.sort(key=lambda x: x['Nom'])\r\n for player in actor_list:\r\n print(player)\r\n elif user_choice == '2':\r\n actor_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in actor_list:\r\n print(player)", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def bestplayers():\n players = Player.query.filter_by(is_admin=False).all()\n players = list(reversed(sorted(players, key=lambda player: player.points)))\n for player in players:\n player.image = url_for('static', filename='images/players/{}'.format(player.image))\n player.team_name = player.team.name\n player.team_logo = url_for('static', filename='images/teams/{}'.format(player.team.logo_image))\n\n return render_template('standings/best-players.html', players=players, title='Best Players')", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)", "def test_retrieve_players(self):\n Player.objects.create(name='Mayita', victories=0,\n defeats=0)\n Player.objects.create(name='Moiso', victories=0,\n defeats=0)\n\n res = self.client.get(PLAYERS_URL)\n\n players = Player.objects.all().order_by('-name')\n serializer = PlayerSerializer(players, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def tourplayers(request):\n active_players = PlayerModel.objects.order_by('number')\n\n context = {\n 'active_players': active_players,\n }\n\n return render(request, 'tourPlayers.html', context=context)", "def get_players(self, hero, data, verbose):\n\n if len(self.players) > 1:\n out = f\"\\n\\nplayers:\"\n for name, player in data[\"players\"].items():\n if name != hero:\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(player)\n else:\n out = f\"\\n\\nThere's nobody else here.\"\n\n return out", "def get_user_players(self, userid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/standings.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/playerInfo.phtml?pid=' + str(userid),\r\n headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n # title = soup.title.string\r\n # community = soup.find_all('table', border=0)[1].a.text\r\n # username = re.search('\\((.*?)\\)', soup.find('div', id='title').text).group(1)\r\n players_info = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n cad = i.find_all('td')\r\n player_id = int(re.findall('\\d+', i.find_all('img')[0]['src'])[0])\r\n name = cad[2].text.strip()\r\n club = cad[3].find('img')['alt']\r\n club_id = int(re.findall('\\d+', i.find_all('img')[1]['src'])[0])\r\n value = float(cad[4].text.replace(\".\", \"\"))\r\n totalpoints = float(cad[5].text)\r\n position = self.translate_position(cad[6].text)\r\n players_info.append([player_id, name, club_id, club, value, totalpoints, position])\r\n return players_info", "def get_player_list(tournament):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste indice Joueurs']\n player_list = []\n for player_id in id_list:\n # getting the players\n player = players_table.get(doc_id=player_id)\n player_list.append(player)\n return player_list", "async def get_all_top_10(self) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/vnd.lichess.v3+json'\n }\n response = await self._client.request(method=RequestMethods.GET, url=USERS_PLAYER_URL, headers=headers)\n return response", "def players_report(self, sort_order='alpha'):\n self._view.report(self._model.get_players(sort_order))", "def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id", "def get_players(team_id: int) -> list[Player]:\n\n players = Player.query.filter_by(team_id=team_id).order_by(Player.position.asc()).all()\n\n return players", "def getindex(self):\n players = [dict(plr) for plr in meta.Session.query(model.Player).all()]\n return {'success': True, 'data': players}", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def players_list(self):\n self.db = TinyDB('Models/db.json')\n self.query = Query()\n player_table = self.db.table('player_table')\n return player_table", "def sort_player(self, player):\r\n if not player:\r\n return random.choice(self.players)\r\n return player", "def get_all_players():\n players = {}\n\n for char in list(string.ascii_uppercase):\n req = requests.get(\n 'http://www.euroleague.net/competition/players?listtype=alltime&letter=' + char\n )\n\n soup = BeautifulSoup(req.text, 'html5lib')\n\n mydivs = soup.findAll('div', {'class': 'items-list'})\n\n for div in mydivs:\n itemdivs = soup.findAll('div', {'class': 'item'})\n\n\n for div in itemdivs:\n links = div.findAll('a')\n for index, link in enumerate(links):\n if index % 2 == 0:\n player = link.text.replace(',', '').strip()\n link['href'] = link['href'].replace('?', '')\n result = re.findall(\n '/competition/players/showplayerpcode=(.*)&seasoncode=', link['href']\n )\n code = result[0]\n players[code] = player\n \n return players", "def getAllPlayers():\n result_dict = {}\n teams = get_team()\n players = db.session.query(Player, Team).join(Team, Player.team_id == Team.team_id)\n for each in players:\n if each.Player.player_image is not None:\n each.Player.player_image = b64encode(each.Player.player_image)\n result_dict['teams'] = teams\n result_dict['players']= players\n return render_template('viewplayers.html', result=result_dict)", "def printPlayerOrder(self):\n print(\"The order of players is ...\")\n print()\n for i in range(len(self.playerList)):\n print(\"\\t\" + str(i+1) + \".\", end = \" \")\n print(self.playerList[i])\n print()", "def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names" ]
[ "0.75460577", "0.6925172", "0.662122", "0.65018445", "0.64117795", "0.62326056", "0.6039298", "0.60154843", "0.595802", "0.5942938", "0.59348524", "0.590942", "0.58476466", "0.5783551", "0.5783031", "0.57642424", "0.57592744", "0.5758528", "0.56998295", "0.56920516", "0.5680249", "0.56766224", "0.5643718", "0.5642398", "0.5639138", "0.56326336", "0.56093675", "0.5569737", "0.5559502", "0.55572957" ]
0.7615844
0
Show player of specific tournament
def show_players_specific_tournament(self) -> None: id_choice = check.request_id(TOURNAMENTS) tournament_data = TOURNAMENTS.get(doc_id=id_choice) if tournament_data.get("players") == {}: print("\n This tournaments has no players yet") else: players_list = tournament_data.get("players") deserialized_player_list = [] for player_data in players_list: deserialized_player = Player(**json.loads(player_data)) deserialized_player_list.append(deserialized_player) utils.clear_terminal() print( "Do you want the list of players by alphabetical order or by ranking ? \n" "1 - Ranking players list \n" "2 - Alphabetical players list" ) choice = check.request_selection_with_number("alphabetical", "ranking", "None") if choice == "alphabetical": utils.clear_terminal() deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name) for deserialized_player in deserialized_player_list: print(deserialized_player) elif choice == "ranking": utils.clear_terminal() deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking) for deserialized_player in deserialized_player_list: print(deserialized_player)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tournament(self):\n pass", "def display_tournament_player_list(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n player_list = list()\r\n for rated_player in tournament['Classement']:\r\n player_list.append(players_table.get(doc_id=rated_player[0]))\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les joueurs du tournoi de\", tournament_name, \": \")\r\n if user_choice == '1':\r\n player_list.sort(key=lambda x: x['Nom'])\r\n for player in player_list:\r\n print(player)\r\n elif user_choice == '2':\r\n player_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in player_list:\r\n print(player)", "def display_tournament_list():\r\n for tournament in tournaments_table:\r\n print(tournament['Nom'])", "def tournament(self, name):\n self.name = name\n q = Query()\n data = TinyDB('app/data/db_tournaments.json').table('tournaments')\n\n self.search_result = data.search(\n (q.name == self.name) |\n (q.place == self.name)\n )\n\n if len(self.search_result) == 0:\n v_menu.View().search('tournament_none')\n return 'None'\n\n elif len(self.search_result) == 1:\n v_menu.View().search_tournaments(\n 'find_tournament',\n self.search_result[0]['name'],\n self.search_result[0]['place'],\n self.search_result[0]['start']\n )\n return self.search_result[0]['id']\n\n elif len(self.search_result) >= 2:\n for i in range(len(self.search_result)):\n v_menu.View().search_tournaments(\n 'find_tournaments',\n self.search_result[i]['name'],\n self.search_result[i]['place'],\n self.search_result[i]['start'], i+1\n )\n\n self.player_number = c_input.Input().select_menu_number(\n len(self.search_result))\n\n return self.search_result[self.player_number-1]['id']", "def display_all_round_matches(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n print(\"Matches du tournoi de\", tournament_name, \"ayant eu lieu: \")\r\n for match in tournament['Matches joues']:\r\n print(players_table.get(doc_id=match[0])['Nom'], \"(BLANCS) contre\",\r\n players_table.get(doc_id=match[1])['Nom'], \"(NOIRS)\")", "def printWinner(self, player):\n winner = player.getName()\n if winner == \"You\":\n print player.getName(), \"have won the game\"\n else:\n print player.getName(), \"has won the game\"", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def select_winner(self, match: Match):\n self.clean()\n print(f\"Bienvenue dans le gestionnaire de tournois d'échec.\\nSuivi des \"\n f\"Matchs opposant : {match.players[0].fullname} à {match.players[1].fullname}\")\n print(\"\\n\\tSelection du vainqueur : \\n\")\n\n print(f\"\\t1 : {match.players[0].fullname}\")\n print(f\"\\t2 : {match.players[1].fullname}\")\n print(\"\\t3 : En cas d'égalité\")\n\n print(\"\\n\" * 5)", "def show_bench_player(self):\n if (len(self.bench_players) == 0):\n print(\"The bench is empty.\")\n else:\n for i in range(len(self.bench_players)):\n print(self.bench_players[i].name)", "def __str__(self):\n return self.playername", "def __str__(self):\n return \"Player: {}\".format(self.name)", "def tournament_matches_report(self):\n tournament_id = self.select_tournament()\n self._model = Tournament({'id': tournament_id})\n self._view.report(self._model.get_matches())\n\n self.tournament_menu()", "def select_player(n):\n pygame.display.set_caption(\"You selected: \" + PROF[n])", "def display_selected_team(team_id):\n if request.method == 'GET':\n result_dict = {}\n teams = get_team()\n players = Player.query.join(Team, Player.team_id==team_id).\\\n add_columns(Player.player_fname,Player.player_lname,Team.team_name,Player.player_id)\n result_dict['teams'] = teams\n result_dict['players']= players\n return render_template('viewplayers.html', result=result_dict)", "def display(self) -> None:\n # ask info to display to controller\n self.list_matches = ct.Controls.get_current_matches()\n for elem in self.list_matches: # setup the data to be displayed\n p1 = str(elem.player1)\n p2 = str(elem.player2)\n match = p1 + ' vs ' + p2\n self.lignes.append({'match_instance': elem, 'label': match, 'choice': ['match nul', p1, p2],\n 'result': None})\n for index, elem in enumerate(self.lignes): # display the data\n self.widgets.append(self.my_simple_line(self.master, elem['label'], index + 1, 0, 1, 1, 10, 10))\n elem['result'], menu_option = self.my_option_menu(self.master, elem['choice'], index + 1, 1, 1, 1, 10, 10)\n self.widgets.append(menu_option)\n # display the button to go through next step of tournament\n self.widgets.append(self.my_button(self.master, 'Clôturer ce tour', 0, len(self.lignes) + 1, self.save_scores))", "def display_player(cls, player, title=False):\n menu = \"-\".center(cls.MAX_LENGTH, '-') + \"\\n\"\n if title:\n menu += str(title).center(cls.MAX_LENGTH, '-') + \"\\n\\n\"\n menu += \"Last Name\".ljust(cls.NAME_LENGTH) + \"First Name\".ljust(cls.NAME_LENGTH)\n menu += \"Gender\".ljust(cls.GENDER_LENGTH) + \"Date Birth\".ljust(cls.DATE_LENGTH)\n menu += \"Elo\".ljust(cls.ELO_LENGTH) + \"\\n\"\n if isinstance(player, list):\n s = menu\n for instance_player in player:\n s += instance_player.last_name.ljust(cls.NAME_LENGTH)\n s += instance_player.first_name.ljust(cls.NAME_LENGTH)\n s += instance_player.gender.ljust(cls.GENDER_LENGTH)\n s += instance_player.date_birth.ljust(cls.DATE_LENGTH)\n s += instance_player.ranking.ljust(cls.ELO_LENGTH) + \"\\n\"\n print(s)\n else:\n s = menu\n s += player.last_name.ljust(cls.NAME_LENGTH)\n s += player.first_name.ljust(cls.NAME_LENGTH)\n s += player.gender.ljust(cls.GENDER_LENGTH)\n s += player.date_birth.ljust(cls.DATE_LENGTH)\n s += player.ranking.ljust(cls.ELO_LENGTH) + \"\\n\"\n print(s)", "def display_make_a_tournament():\n display_head_menu(\"Création du tournois\")\n tournament_dict = {}\n tournament_dict.update({\"name\": display(\"Nom: \")})\n tournament_dict.update({\"place\": display(\"Lieu: \")})\n tournament_dict.update({\"starting_date\": display(\"Date de début: \", \"date\")})\n tournament_dict.update({\"ending_date\": display(\"Date de fin: \", \"date\", \"\", tournament_dict[\"starting_date\"])})\n tournament_dict.update({\"kind\": type_str()})\n print(f'Type : {tournament_dict[\"kind\"]}')\n if display(\"modifier le nombre de ronde (defaut 4) (o/n)\\n\", \"choice\", cases=[\"o\", \"n\"]) == \"o\":\n tournament_dict.update({\"number_of_round\": display(\"Nombre de ronde: \", \"natural\")})\n else:\n pass\n tournament_dict.update({\"description\": display(\"Description: \")})\n return tournament_dict", "def found_specific_player(self) -> Player:\n search_question = ('Nom du joueur recherché : ',\n 'Prénom du joueur recherché : ')\n search_response = []\n for question in search_question:\n valid = self.ask_and_store_text(question)\n while not valid[0]:\n valid = self.ask_and_store_text(question)\n search_response.append(valid[1])\n\n for player in Player.PLAYERS:\n if player.name.upper() == search_response[0].upper() and \\\n player.first_name.capitalize() == search_response[1].capitalize():\n return player\n\n self.view_menu.stand_by_msg(\"Joueur introuvable !\\n\"\n \"Rechercher à nouveau ou créer le joueur\")", "def view_saved_tournament(self, subtitle=\"Selection des sauvegardes :\", *, data: dict):\n\n self.clean()\n print(f\"Bienvenue dans le gestionnaire de tournois d'échec.\\n{subtitle}\")\n print(\"\\n\" * 1)\n for n, tournament in enumerate(data):\n print(\n f\"\\t{n + 1} : {tournament['id']} - {tournament['name']} -\"\n f\" {tournament['tournament_date']} - {tournament['location']}\")", "def display_tournament(self, title: str, subtitle: str = \"\\n\", datas: list = None):\n self.clean()\n print(f\"{title}\")\n print(f\"{subtitle}\\n\")\n for data in datas:\n print(f\"\\t{data}\")\n print(\"\\n\" * 2)\n self.stand_by_msg(\"\")", "def rd2leaderboard(request):\n\n #Add views\n playing_players = Rd2SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd2Leaderboard.html', context=context)", "def rd4leaderboard(request):\n\n #Add views\n playing_players = Rd4SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd4Leaderboard.html', context=context)", "async def team_show(self, ctx: commands.Context, team_id: int):\n try:\n if team_id not in self.teams:\n self.teams[team_id] = await self._get_team_data(team_id)\n team = self.teams[team_id]\n except KeyError:\n await ctx.send(f'Unrecognized team ID {team_id}. If you think this is a '\n 'valid team ID, perhaps no one from that team has '\n 'registered a Discord account yet.')\n return\n\n if ctx.guild:\n members, users = self._get_members_if_possible(\n [user.id for user in team.users], ctx.guild)\n else:\n members, users = [], team.users\n\n pages = paginate_team_data(members, users,\n [channel for channel in team.channels\n if channel and channel.guild == ctx.guild])\n\n embeds = [\n discord.Embed(title=f'**{team.display_name} (ID: {team.team_id})**',\n color=discord.Color(0x22aaff),\n description=content)\n for content in pages]\n if len(embeds) == 1:\n await ctx.send(embed=embeds[0])\n else:\n await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=120)", "def display_all_tournament_rounds(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n print(\"Rondes du tournoi de\", tournament_name, \": \")\r\n for current_round in tournament['Rondes']:\r\n print(current_round)", "def start_tournament(self):\n cmd = '{}startTournament'.format(self.console)\n self.write_command(cmd)", "def view_pokemon(self, team_name: str, team_choice: str) -> None:\n\n print(f\"\\n\\u001b[1m\\u001b[4mTeam\\u001b[0m: \\u001b[7m {team_name} \\u001b[0m\")\n print(f\"\\n\\u001b[4mPokémon Slot #{int(team_choice)}\\u001b[0m\\n\\n\")\n print(f\"\\u001b[1mName\\u001b[0m: {self.name}\")\n print(f\"\\u001b[1mPokédex ID:\\u001b[0m {self.id}\\n\")\n print(f\"\\u001b[1mHeight\\u001b[0m: {self.height} decimetres\")\n print(f\"\\u001b[1mWeight\\u001b[0m: {self.weight} hectograms\\n\")\n\n if len(self.types) == 2:\n print(f\"\\u001b[1mTypes\\u001b[0m: {self.types[0]}\")\n print(f\" {self.types[1]}\")\n else:\n print(f\"\\u001b[1mType\\u001b[0m: {self.types[0]}\")\n\n print(\"\")\n print(\"\\u001b[1mAbilities\\u001b[0m:\")\n if len(self.abilities) > 0:\n for ability in self.abilities:\n print(f\" - \\u001b[4m{ability}\\u001b[0m:\")\n print(f\" {self.abilities[ability]}\")\n else:\n print(\" This Pokémon has no abilities.\")\n\n print(\"\")\n print(\"\\u001b[1mCurrent Move Set\\u001b[0m:\")\n if len(self.move_set) > 0:\n for move in self.move_set:\n print(f\" - {move.name}\")\n else:\n print(\" This Pokémon cannot learn any moves.\")\n\n print(\"\\n\")", "def show_contest_winner(self, db_session):\n users_contest_list = db_session.query(db.User).filter(db.User.entered_in_contest.isnot(False)).all()\n if len(users_contest_list) > 0:\n winner = random.choice(users_contest_list)\n self._add_to_chat_queue('The winner is {}!'.format(winner.name))\n else:\n self._add_to_chat_queue('There are currently no entrants for the contest.')", "def get_players(self, hero, data, verbose):\n\n if len(self.players) > 1:\n out = f\"\\n\\nplayers:\"\n for name, player in data[\"players\"].items():\n if name != hero:\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(player)\n else:\n out = f\"\\n\\nThere's nobody else here.\"\n\n return out", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def player(self):\n return self.players[self.tictactoe.turn]" ]
[ "0.6981447", "0.6793419", "0.6784374", "0.66762227", "0.6477427", "0.63487387", "0.6321924", "0.63151205", "0.6281794", "0.6260333", "0.6196538", "0.6196334", "0.61410666", "0.6126508", "0.61263525", "0.6125865", "0.60329247", "0.60239154", "0.6022243", "0.6020785", "0.60054594", "0.5993188", "0.5980853", "0.59743863", "0.5947143", "0.5937855", "0.59320474", "0.59102523", "0.59034026", "0.5902901" ]
0.8030547
0
Request 8 id of players saved in players.json and return a list of ids
def create_players_id_dict(self) -> list: players_id = [] self.show_players() print("\n" + "Enter id of wanted players : ") while len(players_id) < 8: while True: id_choice = check.request_id(PLAYERS) if check.check_not_same_value(players_id, id_choice) is True: players_id.append(id_choice) break return players_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_players_id(player_number):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # getting the last eight players\n id_list = []\n for i in range(1, player_number + 1):\n # getting player\n data = players_table.all()[-i]\n # Obtaining a user ID\n id_list.append(data.doc_id)\n return id_list", "def get_player_list(tournament):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste indice Joueurs']\n player_list = []\n for player_id in id_list:\n # getting the players\n player = players_table.get(doc_id=player_id)\n player_list.append(player)\n return player_list", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def get_players_data(players):\n\n users_response = requests.get(\n url=f'{settings.USER_MANAGER_URL}/user-data/',\n params={'player_id': players},\n timeout=5 # in sec\n )\n if users_response.status_code == 200:\n return users_response.json().get('players')\n return {}", "def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)", "def get_players(self):\r\n return self.players.values()", "def load_fixture_player_stats(self):\n stats_list = []\n\n print(\"Getting fixture players..\")\n with Pool(self.pool) as p:\n fixture_info = list(tqdm(p.imap(self.fixture_info_singel, self.fixture_ids, chunksize=1), total=len(self.fixture_ids)))\n print('Getting data from workers..')\n i = 0\n for info in fixture_info:\n stats = {}\n if info:\n stats = {info['id']: []}\n if 'teamLists' in info:\n team_list = info['teamLists']\n for lineups in team_list:\n if lineups:\n team_id = lineups['teamId']\n lineup = lineups['lineup']\n substitutes = lineups['substitutes']\n for l in lineup:\n stats[info['id']].append(l['id'])\n for s in substitutes:\n stats[info['id']].append(s['id'])\n else:\n i += 1\n if stats:\n stats_list.append(stats)\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n return stats_list", "def players(self):\n return self._get(\"players\")", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def teammates_player_ids(self):\n return [p.player_id for p in self.teammates]", "def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players", "def get_user_players(self, userid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/standings.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/playerInfo.phtml?pid=' + str(userid),\r\n headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n # title = soup.title.string\r\n # community = soup.find_all('table', border=0)[1].a.text\r\n # username = re.search('\\((.*?)\\)', soup.find('div', id='title').text).group(1)\r\n players_info = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n cad = i.find_all('td')\r\n player_id = int(re.findall('\\d+', i.find_all('img')[0]['src'])[0])\r\n name = cad[2].text.strip()\r\n club = cad[3].find('img')['alt']\r\n club_id = int(re.findall('\\d+', i.find_all('img')[1]['src'])[0])\r\n value = float(cad[4].text.replace(\".\", \"\"))\r\n totalpoints = float(cad[5].text)\r\n position = self.translate_position(cad[6].text)\r\n players_info.append([player_id, name, club_id, club, value, totalpoints, position])\r\n return players_info", "def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )", "def test_gridironfootballplayers_id_get(self):\n pass", "def get_users(users_list):\n formatted_list = \",\".join(users_list)\n built_get = \"%s&steamids=%s\" % (GETPLAYERSUMM_URL, formatted_list)\n try:\n req = requests.get(built_get)\n if req.status_code == 200:\n response = json.loads(req.text)[\"response\"][\"players\"]\n players = {}\n for player in response:\n players[player[\"steamid\"]] = player\n return players\n except requests.exceptions.RequestException as req_exc:\n print(req_exc)\n\n return []", "def players(self, game: str) -> Response:\n\n endpoint = '/api/players'\n query = f'?game={game}'\n return self.fetch(endpoint, query)", "def get_players(n_players):\n\n if n_players < 2 or 8 < n_players:\n raise ValueError('A game must have between 2 to 8 players. You input {} players.'.format(n_players))\n\n return {classes.Player(p) for p in range(n_players)}", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game", "def get_players(team_id: int) -> list[Player]:\n\n players = Player.query.filter_by(team_id=team_id).order_by(Player.position.asc()).all()\n\n return players", "def load_player_ids():\n print(\"Loading registered player ids from CSV file... \", end='', flush=True)\n player_ids = {}\n with open(ZLIST_FILE, 'r', newline='') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for player_name, player_id in csv_reader:\n player_ids[player_name] = player_id\n print(\"Done.\")\n return player_ids", "def getPlayers(self):\n\t\tself.server.playerMutex.lock()\n\t\tplayers = [ (player[0], player[1][3]) for player in self.server.players.items() ]\n\t\tself.server.playerMutex.unlock()\n\t\treturn players", "def players(self):\n if self.players_cache is None:\n team_df = self.teams()\n self.players_cache = self.ea.players_endpoint(\n team_df[\"id\"].tolist())\n\n columns = [\"teamId\", \"playerId\", \"name\", \"position\"]\n all_players = []\n for team in self.players_cache[\"teams\"]:\n team_id = team[\"id\"]\n for plyr in team[\"roster\"][\"roster\"]:\n player_id = plyr[\"person\"][\"id\"]\n player_name = plyr[\"person\"][\"fullName\"]\n position = plyr[\"position\"][\"abbreviation\"]\n all_players.append({columns[0]: team_id,\n columns[1]: player_id,\n columns[2]: player_name,\n columns[3]: position})\n return pd.DataFrame(data=all_players, columns=columns)", "def display_imported_players(players_id_list):\r\n for player_id in players_id_list:\r\n print(players_table.get(doc_id=player_id))", "def get_player_id(player_name, sport_code):\n resp = requests.get(str.format('http://sports.yahoo.com/{0}/players', sport_code),\n params={ 'type': 'lastname', 'first': '1', 'query': player_name})\n results = []\n if resp.status_code != requests.codes['ok']:\n return results\n selector = lxml.etree.fromstring(resp.content, lxml.etree.HTMLParser())\n player_ids = selector.xpath('//table/tr[contains(td,\"Search Results\")]/following::tr[position()>1]/td[1]//@href')\n if player_ids:\n for p in player_ids:\n id = re.search(r'\\d+', p)\n if id:\n results.append(id.group(0))\n return results", "def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)", "def test_lacrosseplayers_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/lacrosseplayers/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)", "def create_player_list(self, current_game):\n players = [Player(c['summonerId'], c['championId'], c['teamId']) for c in current_game['participants']]\n return players", "def get_players_info(team_name):\n # hit this url in browser or postman like http://127.0.0.1:5000/getPlayersInfo/TeamName and it will return json data\n final_player_list = []\n if request.method == 'GET':\n team_res = Team.query.filter_by(team_name=team_name).first()\n if team_res:\n player_res = Player.query.filter_by(team_id=team_res.team_id).all()\n for rec in range(len(player_res)):\n player_info = {}\n player_info['Player_First_Name'] = player_res[rec].player_fname\n player_info['Player_Lirst_Name'] = player_res[rec].player_lname\n player_info['Team'] = team_name\n player_info['Player_ID'] = player_res[rec].player_id\n player_info['Team_ID'] = player_res[rec].team_id\n final_player_list.append(player_info)\n return json.dumps({\"TeamInformation\": final_player_list})\n else:\n return json.dumps({team_name: \"Team is not available\"})" ]
[ "0.7470742", "0.6991253", "0.66275984", "0.6624603", "0.641931", "0.6296722", "0.628128", "0.6240206", "0.6214572", "0.61729085", "0.61693573", "0.615972", "0.61516213", "0.6151583", "0.6128112", "0.6100686", "0.6087615", "0.6070218", "0.6045414", "0.60428107", "0.60382414", "0.6019027", "0.5956603", "0.5940473", "0.5935035", "0.5919274", "0.5865493", "0.5840741", "0.58392775", "0.5830167" ]
0.76639163
0
Simply display message if players.json are empty
def display_empty_players_file(self) -> None: utils.clear_terminal() print("\nNo players has been created yet")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "async def do_players():\n\n download = urllib.request.urlopen(server_api)\n data = json.loads(download.read())\n max = data['players']['max']\n now = data['players']['now']\n await bot.send_message(c, f'Max: {max}')\n await bot.send_message(c, f'Now: {now}')", "def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)", "def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False", "def player_exists(self, player):\n res = self._db.Players.aggregate([{'$match': {'Name': player}},\n {'$project':\n {'br': {'$ifNull': ['$br', 0]},\n 'fg': {'$ifNull': ['$fg', 0]}}}\n ])\n return list(res)", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def enough_players():\n return True", "async def _players(self, ctx: Context):\n\n guild = ctx.guild\n\n player_role = await self.role_from_config(guild, \"player_id\")\n\n players = [\n user.mention for user in guild.members if player_role in user.roles\n ]\n\n title = _(\"Total Players: {}\").format(len(players))\n txt = \"\\n\".join(players)\n\n embed = discord.Embed(\n colour=player_role.color, title=title, description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"I need embed permissions for this command.\")", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def get_players(self, hero, data, verbose):\n\n if len(self.players) > 1:\n out = f\"\\n\\nplayers:\"\n for name, player in data[\"players\"].items():\n if name != hero:\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(player)\n else:\n out = f\"\\n\\nThere's nobody else here.\"\n\n return out", "def is_empty_json(json_object):\n try:\n obj = json.load(json_object)\n except:\n obj = json.dumps(json_object)\n print(TypeError.message)\n return len(obj) == 0", "async def missing_tags(self, owner) -> List[str]:\n dms = await utils.get_dms(owner)\n missing = [player for player in await self.gar.get_players()\n if not self.get_user(player)]\n if not missing:\n return []\n message = ['Missing Discord accounts for the following players:']\n for p in missing:\n message.append(f'- {p}')\n await utils.send_list(dms, message)\n return missing", "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def show_bench_player(self):\n if (len(self.bench_players) == 0):\n print(\"The bench is empty.\")\n else:\n for i in range(len(self.bench_players)):\n print(self.bench_players[i].name)", "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)", "async def tod_list(self, ctx, *args):\n message = \"__Currently Playing__\\n\"\n if len(self.players) == 0:\n message = \"There are currently no users playing.\"\n for player in self.players:\n message += f\"> {str(player)[:-5]}\\n\"\n await ctx.send(message)", "async def check_games(self, ctx):\n print(self.data)\n print(self.games_info)", "def is_empty(self):\n if len(self.messages) < 1:\n return True\n else:\n return False", "def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()", "def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True", "def print_player_error(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n if (not item is None and\n not self.parent.inventory.has_room_for_item(item)):\n message = \"Could not pick up: \" + item.description.name + \\\n \", the inventory is full.\"\n msg.send_visual_message(message, source_entity.position.value)", "def is_empty(self):\n if self.items:\n return 'not empty!'\n return 'empty!'", "def do_nothing(self, player):\n return '%s spins \\'nun\\' and does nothing.' % (player,)", "def is_empty(self):\n return super(VideoCarouselTile, self).results() == []", "def rd4leaderboard(request):\n\n #Add views\n playing_players = Rd4SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd4Leaderboard.html', context=context)", "def print_inventory_items(items):\r\n if not (len(items) == 0):\r\n wrap_print(\"You have \" + list_of_objects(items) + \".\\n\")\r\n else:\r\n wrap_print(\"You don't have anything.\\n\")", "def rd3leaderboard(request):\n\n #Add views\n playing_players = Rd3SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd3Leaderboard.html', context=context)", "def _test_player_list_size(self):\n return len(self.player_list)", "def userReport():\n for player, dat in players.items():\n if \"Arca\" not in dat and \"Observatorio\" not in dat \\\n and \"Atomium\" not in dat and \"Dirigible\" not in dat \\\n and \"Estatua\" not in dat and \"Baño\" not in dat:\n continue\n\n print(\"-------------------------------------------------------------\")\n print(player, \" - \", dat[\"Edad\"])\n if \"Arca\" in dat:\n print(\" Arca %i\" % dat[\"Arca\"])\n if \"Observatorio\" in dat:\n print(\" Observatorio %i\" % dat[\"Observatorio\"])\n if \"Atomium\" in dat:\n print(\" Atomium %i\" % dat[\"Atomium\"])\n\n if \"Estatua\" in dat:\n for ed in dat[\"Estatua\"]:\n print(\" Estatua %i - %s\" % (ed[\"Nivel\"], ed[\"Edad\"]))\n\n if \"Dirigible\" in dat:\n for ed in dat[\"Dirigible\"]:\n print(\" Dirigible 11 - %s\" % ed)\n if \"Baño\" in dat:\n print(\" Baño Real %i - %s\" % (\n dat[\"Baño\"][\"Nivel\"], dat[\"Baño\"][\"Edad\"]))\n\n print()", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True" ]
[ "0.6356104", "0.6113162", "0.59505594", "0.5881741", "0.5878967", "0.584136", "0.58254135", "0.57647717", "0.5733628", "0.56962866", "0.56821424", "0.5673475", "0.55898994", "0.5588674", "0.55653924", "0.5522144", "0.550445", "0.54916596", "0.54665893", "0.54539055", "0.5453438", "0.5449374", "0.54337245", "0.54253995", "0.54016006", "0.53988415", "0.53913575", "0.5378769", "0.53635234", "0.53612214" ]
0.73496026
0
inserts a line of text to a file, after each line containing a specific string
def append_after(filename="", search_string="", new_string=""): with open(filename, 'r') as f: lines = f.readlines() with open(filename, 'w') as f: for line in lines: if search_string in line: f.write(line) f.write(new_string) else: f.write(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n with open(filename, \"r+\") as txt_file:\n lines = []\n for line in txt_file:\n lines.append(line)\n if search_string in line:\n lines.append(new_string)\n with open(filename, \"w+\") as txt_file:\n txt_file.write(\"\".join(lines))", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n with open(filename, mode='r', encoding='utf-8') as f:\n lines = f.readlines()\n f. close()\n\n with open(filename, mode='w', encoding='utf-8') as f:\n for line in lines:\n f.write(line)\n if search_string in line:\n f.write(new_string)\n\n f.close()", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n with open(filename, 'r', encoding='utf-8') as f:\n line_list = []\n while True:\n line = f.readline()\n if line == \"\":\n break\n line_list.append(line)\n if search_string in line:\n line_list.append(new_string)\n with open(filename, 'w', encoding='utf-8') as f:\n f.writelines(line_list)", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n str = \"\"\n with open(filename, mode='r', encoding='utf-8') as fl:\n for line in fl:\n str += line\n if (search_string in line):\n str += new_string\n with open(filename, 'w') as fl:\n fl.write(str)", "def insert_text_in_file(file_path: pathlib.Path, tag: str, text: str) -> bool:\n lines: List[str] = []\n with file_path.open('r') as f:\n lines = f.readlines()\n for ii, line in enumerate(lines):\n if line.find(tag) >= 0:\n lines.insert(ii + 1, text)\n with file_path.open('w') as f:\n f.writelines(lines)\n return True\n return False", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n z = []\n with open(filename, 'r+') as x:\n \"\"\"Read to new string\"\"\"\n for eachline in x:\n z.append(eachline)\n if search_string in eachline:\n \"\"\"I love 'in' but do not like the .action format\"\"\"\n z.append(new_string)\n with open(filename, 'w+') as a:\n a.write(\"\".join(z))", "def insert_first_line(filename, string):\n try:\n import fileinput\n for line in fileinput.input([filename], inplace=True):\n if fileinput.isfirstline():\n print string\n print line,\n except Exception as e:\n print('\\nError adding specified string to file {}: {}.'.format(filename, e))", "def insert(self, line, where=0):\n self.buffer.insert(where, line)", "def replace(file, current_line, new_line):\n with fileinput.input(file, inplace=True) as f:\n for line in f:\n if current_line in line:\n line = new_line\n sys.stdout.write(line)", "def search_and_add(_file, search_string, new_string):\n with open(_file, encoding='utf-8') as f:\n buf = f.readlines()\n new_array = []\n for line in buf:\n new_array.append(line)\n if line == search_string:\n new_array.append(new_string)\n\n with open(_file, 'w') as f:\n for item in new_array:\n f.write(item)", "def line_prepender(file_path: str, line: str) -> None:\n with open(file_path, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip('\\r\\n') + '\\n' + content)", "def insert(self, y, x, text):\n self.lines[y] = self.lines[y][ : x] + text + self.lines[y][x : ]", "def insert_after(self, text, line, col):\n col = self.canonicalize_column_index(line, col)\n col_off = self.col_offs[line]\n adj_col = col_off.get_rewritten_pos(col)\n theline = self.lines[line]\n self.lines[line] = theline[:adj_col] + text + theline[adj_col:]\n col_off.insert(col, len(text))", "def Prepend(filepath, text):\n file_data = text\n if os.path.exists(filepath):\n file_data += open(filepath).read()\n f = open(filepath, 'w')\n f.write(file_data)\n f.close()", "def write_to_file(self, line):\r\n self.file.write(line)\r\n self.file.write(NEW_LINE)", "def update_file(filename, sentinel, text):\n content = None\n with codecs.open(filename, 'r', encoding='utf-8') as handle:\n content = handle.read()\n\n replacement = u\"{0}\\n\\n{1}\".format(sentinel, text)\n content = content.replace(sentinel, replacement, 1)\n with codecs.open(filename, 'w', encoding='utf-8') as handle:\n handle.write(content)\n return", "def add(self, text: str, after: str):\n index = -1\n\n # loop trough all of the elements\n for i, line in enumerate(self.__content_list, start=0):\n if line.startswith(after):\n # we need the next one\n index = i + 1\n # no need to go further\n break\n\n # check if found\n if index is -1:\n return\n\n self.__content_list.insert(index, text)\n self.__content = '\\n'.join(self.__content_list)", "def append_new_line(file_name, text_to_append):\n # Open the file in append & read mode ('a+')\n with open(file_name, \"a+\") as file_object:\n # Move read cursor to the start of file.\n file_object.seek(0)\n # If file is not empty then append '\\n'\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n # Append text at the end of file\n file_object.write(text_to_append)", "def add_line_to_file(line, filepath):\n filepath = os.path.realpath(filepath)\n if not os.path.isdir(os.path.dirname(filepath)):\n os.makedirs(os.path.dirname(filepath))\n found = False\n if os.path.isfile(filepath):\n with open(filepath, 'r+') as myfile:\n lst = myfile.readlines()\n for existingline in lst:\n if line in existingline:\n print(\"line already present\")\n found = True\n if not found:\n myfile = open(filepath, 'a+')\n myfile.write(line+\"\\n\")\n myfile.close()", "def _write_line(self, line):\n self._file.write(line + '\\n')", "def prepend_line(file_name, line):\n # define name of temporary dummy file\n dummy_file = file_name + '.bak'\n # open original file in read mode and dummy file in write mode\n with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:\n # Write given line to the dummy file\n write_obj.write(line + '\\n')\n # Read lines from original file one by one and append them to the dummy file\n for line in read_obj:\n write_obj.write(line)\n # remove original file\n os.remove(file_name)\n # Rename dummy file as the original file\n os.rename(dummy_file, file_name)", "def append_sequence_of_text(self, sequence):\n file_name = self.file_name\n with open(file_name, 'a+') as file:\n file.write('\\n\\n')\n for line in sequence:\n file.write('\\t{}\\n'.format(line))", "def add_to_output(line: str) -> None:\n with open(OUTPUT, \"a+\") as problem_output:\n problem_output.write(line + \"\\n\")", "def append_string_to_textfile(filename, string):\n filepath = root + filename\n with open(filepath, 'a+') as file:\n file.write(string + \"\\n\")", "def new_write_line(self, line):\n if self.status:\n with open(self.file_out_name,'w') as fout:\n fout.write(line)", "def replace_lines(file_path, idx=[0], new_lines=['Hello!\\n'], dest=None):\n with open(file_path, 'r') as f:\n lines = f.readlines()\n\n if len(idx) == len(new_lines):\n for i, nl in zip(idx, new_lines):\n lines[i] = nl\n\n if dest is None:\n dest = os.path.split(file_path)[0]\n os.remove(file_path)\n\n new_file = os.path.join(dest, os.path.basename(file_path))\n with open(new_file ,'w') as nf:\n for line in lines:\n nf.write(line)\n else:\n print('Requested indices do not match given number of lines!!!')", "def append_after_second_line(self, data):\r\n with open(self.file_name, 'r+', encoding='utf-8') as self.file:\r\n file_data = self.file.read() # Save all the file's content\r\n self.file.seek(0, 0) # Place file pointer at the beginning\r\n first_line = self.file.readline() # Read the first line\r\n second_line = self.file.readline() # Read the second line\r\n self.file.seek(len(first_line + second_line), 0) # Place file pointer at the end of the first line\r\n self.file.write(data) # Write data\r\n self.file.write('\\n' + file_data[len(first_line + second_line):])", "def add_after(keyword_block, string_to_add, find_strings=None):\n \n if find_strings is None:\n line_num = 0\n else:\n line_num = find_strings_in_iterable(keyword_block.sieBlocks, find_strings)\n \n keyword_block.insert(line_num, string_to_add)", "def insert_text(self, text):\n self.str += text", "def write_lines(file_lines, new_file):\n with open(new_file, 'w') as f:\n for l in file_lines:\n f.write(l)" ]
[ "0.73460144", "0.73188967", "0.7307326", "0.7201569", "0.7142726", "0.7142327", "0.6799034", "0.67887074", "0.6714846", "0.6573208", "0.65569973", "0.6455345", "0.64003253", "0.6389518", "0.6370676", "0.63586074", "0.627488", "0.6209781", "0.61661446", "0.6145634", "0.61024433", "0.6073374", "0.6062729", "0.6011992", "0.60114926", "0.5997684", "0.5967831", "0.59498966", "0.5942171", "0.59236246" ]
0.7350903
0
Uses UNCLE to build the number of each nbody clusters specified in the settings.in file.
def buildClusters(self): oldLatFile = 'needed_files/lat.in' oldFile = open(oldLatFile, 'r') oldLines = [line for line in oldFile] oldFile.close() newFile = open('enum/lat.in','w') for i in xrange(len(oldLines)): if 'Number pairs' in oldLines[i-1] and i>=1: #bch use label on previous line for num in self.clusterNums: newFile.write(str(num) + " ") newFile.write("\n") else: newFile.write(oldLines[i]) newFile.close() lastDir = os.getcwd() os.chdir(lastDir + '/enum') if sum(self.clusterNums)<=1500: #the 1500 assumes you are running Main with 16G. subprocess.call([self.uncleExec, '10'], stdout=self.uncleOut) else: subprocess.call(['echo','Warning: BLOCKING CLUSTER JOB to save time']) # clustersjob = ClustersBuild.clustersjob() # clustersjob.clustBuild() # os.chdir(lastDir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")", "def n_clusters(self):\n return len(self.clusters)", "def atlas_clusters():\n pass", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def n_clusters(self):\n return self.model.n_clusters", "def _load_cluster(self):", "def test_list_cluster_network(self):\n pass", "def build_lhosts(self , sws , lhost_count):\n host_count = 0\n for sw in sws:\n for i in range(lhost_count):\n host_id = host_count + 1\n host = self.addHost('h%s' % host_id)\n self.addLink(sw, host)\n host_count += 1\n return host_count", "def add_nodes(self, count=1):\n self.log.info('Adding %d nodes' % count)\n new_nodes = []\n Node.flavor = env_vars['client_flavor']\n for i in range(count):\n #check if cluster did not previously exist\n if i == 0 and len(self.all_nodes) == 0:\n # give a floating IPv4 to the first node only\n new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)\n else:\n new_guy = Node(self.cluster_name, node_type=\"\", number=len(self.all_nodes)+1, create=True)\n self.all_nodes.append(new_guy)\n new_nodes.append(new_guy)\n self.save_cluster()\n for n in new_nodes:\n n.wait_ready()\n #inject host files to everybody\n n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)\n n.bootstrap()\n self.log.info(\"Node %s is live \" % new_guy.name)\n #inform all\n self.inject_hosts_files()", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def Clusters(self):\n return", "def _num_nodes(self):\n return len(self._nid2partid)", "def setup_cluster(num_cpus, outdir, verbose, error_profile):\r\n\r\n server_socket = setup_server()\r\n workers, client_socks_and_adrs = setup_workers(\r\n num_cpus, outdir, server_socket,\r\n verbose=verbose,\r\n error_profile=error_profile)\r\n # we don't need the client adresses anywhere, so get rid of them\r\n client_sockets = [sock for sock, addr in client_socks_and_adrs]\r\n\r\n return client_sockets, workers, server_socket", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def create_dedicated_clusters(ws,number_of_clusters, number_of_nodes, idle_time_out):\n clusters = {}\n for i in range (0,number_of_clusters):\n dig = '{0}{1}'.format(''.join(random.sample(string.digits, 2)),''.join(random.sample(string.ascii_letters, 2)))\n cluster_name = 'NC6-D{1}-{0}'.format(dig,number_of_nodes)\n try:\n compute_target = ComputeTarget(workspace=ws, name=cluster_name) \n except ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size=vmsize,\n max_nodes=number_of_nodes, \n idle_seconds_before_scaledown=idle_time_out)\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n compute_target.wait_for_completion(show_output=True)\n clusters[i] = compute_target\n return clusters", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def _get_cluster_list(self):\n return self.__cluster_list", "def __init__(self, count):\n\n self.clusters_count = count\n self._leaders = [i for i in range(count)]\n self._ranks = [0] * count", "def gen_clusters(links, posts):\n\n clusters = list(iter_clusters(links, posts))\n return clusters", "def clusters(self):\n raise NotImplementedError", "def test_create_cluster_network(self):\n pass", "def generate_clusters_n(df, tweet_deleted):\n # cluster_labels, n_clusters = dbscan(normalised_df, true_labels, 0.25, 30)\n # print(\"normalised_df.head()\", normalised_df.head())\n clusterer = hdbscan.HDBSCAN(min_cluster_size=10)\n clusterer.fit(df)\n labels = clusterer.labels_\n cluster_groups = {}\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n df[\"cluster\"] = labels\n df[\"tweet_deleted\"] = tweet_deleted\n cluster_results = list()\n for cluster_no in cluster_groups.keys():\n print(\"++++++++++\")\n print(\"cluster_no\", cluster_no)\n cluster_result = list()\n cluster_result.append(cluster_no)\n\n cluster = df.mask('cluster', cluster_no)\n print(cluster_no, \" :\")\n tweet_deleted = cluster.mask('tweet_deleted', True).shape[0]\n not_tweet_deleted = cluster.mask('tweet_deleted', False).shape[0]\n print(\"deleted_df len:\", tweet_deleted)\n print(\"not_deleted_df len:\", not_tweet_deleted)", "def _init_cluster(self):\n self._Init_Cluster()", "def get_worker_nodes(self):\n worker_nodes_count = input('enter number of worker nodes\\n'\n 'default [2]: ')\n default = 2\n worker_nodes_count = set_values(worker_nodes_count, default, check='integer')\n worker_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['worker_nodes'] = []\n for num in range(worker_nodes_count):\n worker_values = []\n default = 'worker-{}'.format(num)\n worker_name = input('enter the worker {} node name\\n'\n 'default [{}]: '.format(num, default))\n worker_name = set_values(worker_name, default)\n worker_ip = get_ip(node_name=worker_name, ip_type='os')\n worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')\n worker_values.append(worker_name)\n worker_values.append(worker_ip)\n worker_values.append(worker_mac)\n worker_node_dict_pairs = dict(zip(worker_keys, worker_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,\n worker_ip, worker_mac)) \n self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count", "def get_connectivity(config, inf_cluster):\n\n N, M = config.shape\n # For Manhatten metric on a periodic lattice the maximum distance is half\n # the width/height for each direction\n max_r = int(np.floor(N / 2) + np.floor(M / 2))\n # Count of how many sites at distance r are in the same cluster. The first\n # two entries are 1 because they don't need to be calculated\n count = np.array([1] + [1] + [0] * (max_r - 1), dtype=np.float_)\n # Count of how many occupied sites at distance exist regardless of cluster.\n # First two entries are 1 because they don't need to be calculated\n norm = np.array([1] + [1] + [0] * (max_r - 1), dtype=np.float_)\n\n # Go through the entire lattice\n for x in range(N):\n for y in range(M):\n # If the site is not occupied ignore\n if config[x, y] == 0 or config[x, y] == inf_cluster:\n continue\n\n # Calculate the distances from the site\n dist = manhattan_distance(config.shape, (x, y))\n # Label of the site\n cluster = config[x, y]\n # See next if statement\n cluster_edge_reached = False\n # Go through all distances except 0 and 1 because they are always\n # part of the same cluster\n for r in range(2, max_r + 1):\n # Mask for occupied sites at distance r\n at_dist_r = np.logical_and(dist == r, config != 0)\n norm[r] += np.sum(at_dist_r)\n\n # If the same_cluster for last distance was 0 then it has to\n # be 0 for all larger values of r -> skip\n if not cluster_edge_reached:\n # Number of occupied sites at dist r that are part of the\n # same cluster\n same_cluster = np.sum(np.logical_and(at_dist_r,\n config == cluster))\n if same_cluster:\n count[r] += same_cluster\n else:\n cluster_edge_reached = True\n\n mask = norm != 0\n count[mask] = count[mask] / norm[mask]\n return count", "def set_n(self, n, n_background=None):\n if type(n) == float or type(n) == int:\n self.n = np.ones(self.n_clusters) * n\n if n_background is not None:\n # assume thinnest cluster (=cluster 0) is background\n self.n[0] = n_background \n elif len(n) == self.n_clusters:\n self.n = n", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")" ]
[ "0.6166934", "0.6090059", "0.60112536", "0.5973559", "0.59174323", "0.5835875", "0.58161783", "0.5799246", "0.57445395", "0.5734813", "0.5667729", "0.5656728", "0.56186646", "0.56018144", "0.5588197", "0.55617344", "0.5551129", "0.5544107", "0.5538375", "0.5520006", "0.5515516", "0.5481306", "0.5478595", "0.54579777", "0.54570657", "0.5451787", "0.5449742", "0.54382247", "0.5430731", "0.54027796" ]
0.60994977
1
If startMethod is not the same for each atomChooses a list of i.i.d. structures from struct_enum.out for each different metal atom, The UNCLE option that we run to choose the training structures should look for a file called 'past_structs.dat' so as not to choose structures from that list. (Dr. Hess added that option in the UNCLE executable we're using.) The length of the list of training structures for each atom is determined by the TRAINING_STRUCTS setting in settings.in.
def chooseTrainingStructures(self,iteration, startMethod,nNew,ntot): lastDir = os.getcwd() natoms = len(self.atoms) iidStructs = [[]]*natoms if (iteration == 1 and startMethod == 'empty folders') or natoms == 1: #initialize training_set_structures in enumpast/. Compute iid structures once, and copy to all atom folders that need them subprocess.call(['echo','\nChoosing i.i.d. structures for all\n']) os.chdir('enum') if 2 <= nNew[0] < ntot: #uncle requires at least 2 iid structures. subprocess.call([self.uncleExec, '42', str(nNew[0])], stdout=self.uncleOut) lines = readfile('training_set_structures.dat') elif nNew[0] == ntot: #asking for all the structures for small enumerations, so just list them structlines = ['{} {}\n'.format(str(i+1),str(i+1)) for i in range(ntot)] writefile(structlines,'training_set_structures.dat') lines = readfile('training_set_structures.dat') else: subprocess.call(['echo','\t Number of iid structures requested is less than 2...skipping iids\n']) lines = [] for iatom,atom in enumerate(self.atoms): atomDir = lastDir + '/' + atom iidList = [line.strip().split()[1] for line in lines] subprocess.call(['echo','\nCopying i.i.d. structures for ' + atom + ' . . .\n']) epDir = lastDir + '/' + atom + '/enumpast' subprocess.call(['cp','training_set_structures.dat',epDir]) iidStructs[iatom] = iidList os.chdir(lastDir) else: # must get separate iid structures for each atom, so parallelize #prep for iatom,atom in enumerate(self.atoms): if 2 <= nNew[iatom] < ntot: atomDir = lastDir + '/' + atom # try: os.chdir(atomDir + '/enumpast') subprocess.call(['echo','\nChoosing i.i.d. structures for ' + atom + ' . . .\n']) subprocess.call(['ln','-s','../../enum/struct_enum.out']) subprocess.call(['ln','-s','../../enum/lat.in']) subprocess.call(['ln','-s','../../enum/enum_PI_matrix.out']) subprocess.call(['ln','-s','../../enum/clusters.out']) # subprocess.call([self.uncleExec, '42', str(nNew[iatom])], stdout=self.uncleOut) os.chdir(lastDir) #make job files os.chdir(lastDir) jobIds = [] if sum(nNew[1:])>0: mem = '16' #Gb walltime = 2.0 #hrs subdir = 'enumpast' execString = self.uncleExec + ' 42 ' atomStrings = [str(n) for n in nNew] parallelJobFiles(self.atoms,subdir,walltime,mem,execString,atomStrings) #submit jobs for atoms 2 an above jobIds = parallelAtomsSubmit(self.atoms[1:],subdir) #use this job to calculate the first atom: if 2 <= nNew[0] < ntot: os.chdir(lastDir + '/' + self.atoms[0] + '/' + subdir) subprocess.call(['echo','\tThis job calculating the first atom: {}. Submitted jobs for the others.\n'.format(self.atoms[0])]) subprocess.call([self.uncleExec, '42',str(nNew[0])], stdout=self.uncleOut) os.chdir(lastDir) #wait for others if len(jobIds)>0: parallelAtomsWait(jobIds) #get the iidStructs from training_set_structures.dat for each atom for iatom,atom in enumerate(self.atoms): if 2 <= nNew[iatom] < ntot: lines = readfile(atomDir + '/enumpast/training_set_structures.dat') iidStructs[iatom] = [line.strip().split()[1] for line in lines] # except: # subprocess.call(['echo','\n~~~~~~~~~~ Could not choose i.i.d. structures for ' + atom + '! ~~~~~~~~~~\n']) os.chdir(lastDir) return iidStructs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enumerate(self):\n if not os.path.isdir('enum'): subprocess.call(['mkdir','enum'])\n infile = open('needed_files/struct_enum.in','r')\n inlines = []\n for line in infile:\n inlines.append(line)\n infile.close()\n \n structFile = open('enum/struct_enum.in','w')\n npoints = int(inlines[6].split()[0]) \n for i in xrange(len(inlines)):\n if i == 7 + npoints: \n structFile.write(str(self.volRange[0]) + \" \" + str(self.volRange[1]) + \" \")\n structFile.write(\"# Starting and ending cell sizes for search\\n\")\n else:\n structFile.write(inlines[i])\n structFile.close()\n \n lastDir = os.getcwd()\n os.chdir(lastDir + '/enum')\n subprocess.call(['echo','\\nEnumerating symmetrically unique structures. . .\\n'])\n subprocess.call([self.enumExec,'struct_enum.in'], stdout=self.uncleOut)\n \n self.changeEnumFile() #writes 'bulk' in place of surface\n os.chdir(lastDir)\n subprocess.call(['echo','\\nGenerating clusters. . .\\n'])\n self.buildClusters()\n os.chdir(lastDir + '/enum')\n #Run the smallest iid job possible to calculate enum_PI_matrix.out. All the atoms need it.\n subprocess.call(['echo','\\nCalculating enum_PI_matrix.out\\n']) \n subprocess.call(['rm', 'enum_PI_matrix.out']) \n subprocess.call([self.uncleExec, '42', '2'], stdout=self.uncleOut) \n os.chdir(lastDir)", "def __init__(self, struct_filename, name='molecules', cutoff=None, num_sampled_shards=None, max_num_atoms=None):\n \n print('Loading',name,'set')\n self.name = name\n \n sharded_ds = shard.Sharded.load(struct_filename)\n num_shards = sharded_ds.get_num_shards()\n \n self.num_atoms = []\n self.symbols = []\n self.charges = []\n self.positions = []\n self.index = []\n self.data = []\n self.data_keys = ['label'] # only one property here\n \n # Define indices and subsample if necessary \n shard_indices = np.arange(num_shards)\n if num_sampled_shards is not None and num_sampled_shards < num_shards:\n shard_indices = np.random.choice(shard_indices, size=num_sampled_shards, replace=False, p=None)\n \n for shard_idx in shard_indices:\n \n struct_df = sharded_ds.read_shard(shard_idx)\n labels_df = sharded_ds.read_shard(shard_idx, 'labels')\n ensembles = labels_df['ensemble']\n\n for i, code in enumerate(ensembles):\n\n new_struct = struct_df[struct_df.ensemble==code]\n new_labels = labels_df['label'][i]\n\n muta_chain = labels_df['chain'][i]\n muta_resid = labels_df['residue'][i]\n\n # select the local environment of the mutated residue\n sel_struct = select_environment(new_struct,muta_chain,muta_resid,cutoff)\n print(code, len(new_struct),len(sel_struct))\n\n # move on with the next structure if this one is too large\n if max_num_atoms is not None and len(sel_struct) > max_num_atoms:\n continue\n\n subunits = sel_struct.subunit.unique()\n for j, sub in enumerate(subunits):\n\n sub_df = sel_struct[sel_struct.subunit == sub]\n sub_df = sub_df.reset_index(drop=True)\n\n # get element symbols\n new_symbols = [ elem.title() for elem in sub_df.element ]\n\n # get atomic numbers\n new_atnums = np.array([ pte.GetAtomicNumber(e.title()) for e in sub_df.element ])\n # extract coordinates\n conf_coord = dt.get_coordinates_from_df(sub_df)\n \n # append everything\n self.symbols.append(new_symbols)\n self.charges.append(new_atnums)\n self.positions.append(conf_coord)\n self.num_atoms.append(len(new_atnums))\n\n self.data.append(new_labels) # There will be twice as many structures as labels (order matters!!!)\n \n return", "def store_start_structures(\n self,\n start_structure_names: List[str],\n program_helper: Union[ProgramHelper, None],\n tsopt_task_name: str,\n start_structures: Optional[List[Any]] = None\n ):\n import scine_database as db\n\n if start_structures is None:\n start_structures = self._calculation.get_structures()\n\n # Update model to make sure there are no 'any' values left\n update_model(\n self.systems[self.output(tsopt_task_name)[0]],\n self._calculation,\n self.config,\n )\n\n start_structure_ids = []\n for name in start_structure_names:\n # Check if the new structures are actually duplicates\n duplicate: Optional[db.ID] = None\n dl = ';'.join(self.make_decision_lists_from_calc(self.systems, name))\n graph = self.make_graph_from_calc(self.systems, name)\n for initial_id in start_structures:\n initial_structure = db.Structure(initial_id)\n initial_structure.link(self._structures)\n initial_graph = initial_structure.get_graph(\"masm_cbor_graph\")\n if not masm.JsonSerialization.equal_molecules(initial_graph, graph):\n continue\n aggregate_id = initial_structure.get_aggregate()\n if ';' in initial_graph:\n aggregate = db.Flask(aggregate_id)\n aggregate.link(self._flasks)\n else:\n aggregate = db.Compound(aggregate_id)\n aggregate.link(self._compounds)\n existing_structures = aggregate.get_structures()\n for existing_structure_id in existing_structures:\n existing_structure = db.Structure(existing_structure_id)\n existing_structure.link(self._structures)\n if existing_structure.get_label() in \\\n [db.Label.DUPLICATE, db.Label.MINIMUM_GUESS, db.Label.USER_GUESS]:\n continue\n existing_structure_dl = existing_structure.get_graph(\"masm_decision_list\")\n if masm.JsonSerialization.equal_decision_lists(dl, existing_structure_dl):\n duplicate = existing_structure_id\n break\n if duplicate is not None:\n break\n if duplicate is not None:\n start_structure_ids.append(duplicate)\n continue\n\n new_structure = self.create_new_structure(self.systems[name], db.Label.MINIMUM_OPTIMIZED)\n self.transfer_properties(self.ref_structure, new_structure)\n self.store_energy(self.systems[name], new_structure)\n self.store_property(\n self._properties,\n \"bond_orders\",\n \"SparseMatrixProperty\",\n self.systems[name].get_results().bond_orders.matrix,\n self._calculation.get_model(),\n self._calculation,\n new_structure,\n )\n self.add_graph(new_structure, self.systems[name].get_results().bond_orders)\n if \";\" in graph:\n new_structure.set_label(db.Label.COMPLEX_OPTIMIZED)\n if program_helper is not None:\n program_helper.calculation_postprocessing(self._calculation, self.ref_structure, new_structure)\n start_structure_ids.append(new_structure.id())\n return start_structure_ids", "def read_input():\n\n filenames = sorted(glob.glob(\"%s/openflow_input/*\" % root_dir))\n\n for filename in filenames:\n log(\"Processing struct file: \" + filename)\n ofinput = process_input_file(filename)\n\n # Populate global state\n for wire_version in ofinput.wire_versions:\n version_name = of_g.of_version_wire2name[wire_version]\n versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))\n of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)", "def load_structure(self, **kwargs):\n\n\t\t# PDB fields\n\t\tself.s_name = kwargs[\"s_name\"]\t\t\t\t\t\t\t\t# Name of the structure\n\t\tself.l_s_leading_data = kwargs[\"l_s_leading_data\"]\t\t\t# PDB information written above the atom properties\n\t\tself.l_s_trailing_data = kwargs[\"l_s_trailing_data\"]\t\t# PDB information written under the atom properties\n\n\t\t# Structural fields\n\t\tself.i_atom_count = len(kwargs[\"d_atoms\"][\"element_type\"])\t\t# Retrieves the number of atoms\n\t\tself.a_atoms = np.arange(self.i_atom_count).astype(\t\t\t\t# Array of atoms properties\n\t\t\tnp.dtype([\n\t\t\t\t(\"element_type\", np.str, 6),\t\t\t\t# ATOM or HETATM\n\t\t\t\t(\"atom_serial\", np.uint16, 1),\t\t\t\t# Atom serial number\n\t\t\t\t(\"atom_name\", np.str, 4),\t\t\t\t\t# Atom name\n\t\t\t\t(\"alternative_location\", np.str, 1),\t\t# Alternate location indicator\n\t\t\t\t(\"residue_name\", np.str, 3),\t\t\t\t# Residue name\n\t\t\t\t(\"chain_id\", np.str, 1),\t\t\t\t\t# Chain identifier\n\t\t\t\t(\"residue_serial\", np.int16, 1),\t\t\t# Residue sequence number\n\t\t\t\t(\"residue_insertion\", np.str, 1),\t\t\t# Code for insertion of residues\n\t\t\t\t(\"coord_x\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for X in Angstroms\n\t\t\t\t(\"coord_y\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Y in Angstroms\n\t\t\t\t(\"coord_z\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Z in Angstroms\n\t\t\t\t(\"occupancy\", np.float16, 1),\t\t\t\t# Occupancy\n\t\t\t\t(\"temperature_factor\", np.float16, 1),\t\t# Temperature factor\n\t\t\t\t(\"element_symbol\", np.str, 2),\t\t\t\t# Element symbol\n\t\t\t\t(\"element_charge\", np.str, 2),\t\t\t\t# Charge on the atom\n\t\t\t\t(\"element_mass\", np.float16, 1),\t\t\t# Mass of the atom\n\t\t\t\t(\"grid_x\", np.int16, 1),\t\t\t\t\t# X coordinates in the grid\n\t\t\t\t(\"grid_y\", np.int16, 1),\t\t\t\t\t# Y coordinates in the grid\n\t\t\t\t(\"grid_z\", np.int16, 1),\t\t\t\t\t# Z coordinates in the grid\n\t\t\t\t(\"custom_type\", np.str, 3),\t\t\t\t\t# A custom name for the element\n\t\t\t])\n\t\t)\n\n\t\t# For each field to save\n\t\tfor s_key in kwargs[\"d_atoms\"]:\n\t\t\tself.a_atoms[s_key] = kwargs[\"d_atoms\"][s_key]\t\t# Saves each field of the dictionary of atom properties\n\n\t\tself.a_atoms[\"element_mass\"] = retrieve_element_mass(\t\t# Retrieves the atomic mass of the given elements\n\t\t\tx_element_symbol=self.a_atoms[\"element_symbol\"],\t\t# Element symbol\n\t\t\tx_backup_symbol=self.a_atoms[\"atom_name\"]\t\t\t\t# Element symbol in case of fail\n\t\t)\n\t\tself.translate_custom_types()\t\t# Translates to the custom element types\n\n\t\tself.l_l_elements = set(self.a_atoms[\"element_symbol\"])\t\t# List all the different elements contained in the structure\n\t\tl_s_elements = [None] * len(gp.D_ELEMENT_NUMBER)\t\t\t# Creates an empty list with a slot for each possible element\n\n\t\t# For each chemical element\n\t\tfor s_element in self.l_l_elements:\n\n\t\t\ti_element_number = gp.D_ELEMENT_NUMBER[s_element]\t\t\t# Retrieves the atomic number of the element\n\t\t\ta_element_indexes = np.where(\t\t\t\t\t\t\t\t# Retrieves the indexes of the elements\n\t\t\t\tself.a_atoms[\"element_symbol\"] == s_element\n\t\t\t)\n\n\t\t\tl_s_elements[i_element_number] = [\t\t# Orders each element by their atomic number\n\t\t\t\ts_element,\t\t\t\t\t\t\t# Element symbol\n\t\t\t\ti_element_number,\t\t\t\t\t# Atomic number of the element\n\t\t\t\ta_element_indexes,\t\t\t\t\t# Indexes of the element in the structure\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# Coordinates of the element in the grid\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# VdW radius of the element\n\t\t\t\tNone\t\t\t\t\t\t\t\t# Sphere coordinates of the element\n\t\t\t]\n\t\t# End for\n\n\t\tself.l_l_elements = list(filter(None, l_s_elements))\t\t# Removes empty elements in the list\n\n\t\t# Miscellaneous fields\n\t\tself.f_mass = sum(self.a_atoms[\"element_mass\"])\t\t# Sums the mass of each element", "def __init__(self, ldat_type, station_id, rcusetup_cmds, beamctl_cmds,\n rspctl_cmds, caltabinfos=[], septonconf=None,\n **kwargs):\n self.headerversion = '4'\n\n # ldat_type attr\n self.ldat_type = ldat_type\n\n # filenametime attr (Is set later, since it's know only after obs)\n self.filenametime = None\n\n # station_id attr\n self.station_id = station_id\n\n # rcusetup_cmds attr\n if rcusetup_cmds == []:\n rcusetup_cmds = ['rspctl']\n self.rcusetup_cmds = rcusetup_cmds\n rcusetup_args = modeparms.parse_rspctl_args(self.rcusetup_cmds)\n self.attenuation = None\n if 'attentuation' in rcusetup_args:\n self.attenuation = rcusetup_args['attentuation']\n self.bits = int(rcusetup_args.get('bitmode', 16)) # 16 is default\n self.mode = rcusetup_args.get('mode', None)\n\n # beamctl_cmds related attr\n self.rcumode = []\n self.sb = []\n self.bl = []\n self.pointing = \"\"\n self.beamctl_cmds = beamctl_cmds\n if self.beamctl_cmds != []:\n digdir = None\n if type(self.beamctl_cmds) is not list:\n self.beamctl_cmds = [self.beamctl_cmds]\n for _beamctl_cmd in self.beamctl_cmds:\n (antset, _rcus, rcumode, beamlets, subbands, _anadir,\n digdir) = modeparms.parse_beamctl_args(_beamctl_cmd)\n self.antset = antset\n self.rcumode.append(int(rcumode))\n self.sb.append(subbands)\n self.bl.append(beamlets)\n self.pointing = digdir\n\n # rspctl_cmds attr\n if rspctl_cmds == []:\n rspctl_cmds = ['rspctl']\n self.rspctl_cmds = rspctl_cmds\n rspctl_args = modeparms.parse_rspctl_args(self.rspctl_cmds)\n \n # septonconf attr\n self.septonconf = septonconf\n if self.septonconf is not None:\n self.rcumode = [5]\n \n # attrs: integration, duration_scan\n if self.ldat_type != 'bfs':\n if self.ldat_type != 'acc':\n self.integration = float(rspctl_args['integration'])\n self.duration_scan = float(rspctl_args['duration'])\n else:\n self.integration = 1.0\n self.duration_scan = 512\n # attrs: sb\n if self.ldat_type == 'sst' or self.ldat_type == 'acc':\n self.sb = \"\"\n elif self.ldat_type.startswith('xst'):\n self.sb = str(rspctl_args['xcsubband'])\n # self.rcumode = self.rcumode[0]\n elif self.ldat_type == 'bst':\n self.sb = self.sb\n \n # caltabinfos attr\n if self.ldat_type != 'bst':\n # Only need caltab info if it's BST\n caltabinfos = []\n self.caltabinfos = caltabinfos", "def predict_structure(prefix, model_runner_1: alphafold.model.model.RunModel,\n model_runner_3: alphafold.model.model.RunModel,\n feature_dict, Ls: list[int], model_params: haiku.Params, use_model, do_relax=False,\n random_seed=0):\n\n # Minkyung's code\n # add big enough number to residue index to indicate chain breaks\n idx_res = feature_dict['residue_index']\n L_prev = 0\n # Ls: number of residues in each chain\n for L_i in Ls[:-1]:\n idx_res[L_prev + L_i:] += 200\n L_prev += L_i\n chains = list(\"\".join([ascii_uppercase[n] * L for n, L in enumerate(Ls)]))\n feature_dict['residue_index'] = idx_res\n\n # Run the models.\n plddts, paes = [], []\n unrelaxed_pdb_lines = []\n relaxed_pdb_lines = []\n\n print(f\"Use_model {use_model}\")\n\n for model_name, params in model_params.items():\n if model_name in use_model:\n print(f\"running {model_name}\")\n # swap params to avoid recompiling\n # note: models 1,2 have diff number of params compared to models 3,4,5\n if any(str(m) in model_name for m in [1, 2]): model_runner = model_runner_1\n if any(str(m) in model_name for m in [3, 4, 5]): model_runner = model_runner_3\n model_runner.params = params\n\n processed_feature_dict: affeatures.FeatureDict = model_runner.process_features(feature_dict,\n random_seed=random_seed)\n # prediction_result is a dictionary of NumPy feature arrays\n prediction_result: dict = model_runner.predict(processed_feature_dict)\n unrelaxed_protein: protein.Protein = protein.from_prediction(processed_feature_dict, prediction_result)\n unrelaxed_pdb_lines.append(protein.to_pdb(unrelaxed_protein))\n plddts.append(prediction_result['plddt'])\n paes.append(prediction_result['predicted_aligned_error'])\n\n if do_relax:\n # Relax the prediction.\n amber_relaxer: relax.AmberRelaxation = relax.AmberRelaxation(max_iterations=0, tolerance=2.39,\n stiffness=10.0, exclude_residues=[],\n max_outer_iterations=20)\n relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)\n relaxed_pdb_lines.append(relaxed_pdb_str)\n\n # rerank models based on predicted lddt\n lddt_rank = np.mean(plddts, -1).argsort()[::-1]\n out = {}\n print(\"reranking models based on avg. predicted lDDT\")\n for n, r in enumerate(lddt_rank):\n print(f\"model_{n + 1} {np.mean(plddts[r])}\")\n\n unrelaxed_pdb_path = f'{prefix}_unrelaxed_model_{n + 1}.pdb'\n with open(unrelaxed_pdb_path, 'w') as f:\n f.write(unrelaxed_pdb_lines[r])\n set_bfactor(unrelaxed_pdb_path, plddts[r], idx_res, chains)\n\n if do_relax:\n relaxed_pdb_path = f'{prefix}_relaxed_model_{n + 1}.pdb'\n with open(relaxed_pdb_path, 'w') as f: f.write(relaxed_pdb_lines[r])\n set_bfactor(relaxed_pdb_path, plddts[r], idx_res, chains)\n\n out[f\"model_{n + 1}\"] = {\"plddt\": plddts[r], \"pae\": paes[r]}\n return out", "def __init__(self):\n self.fstDict = {}\n\n self.output_list = None\n\n ## these should be supplied\n self.fst_file_type = 0 # 0:v7.01 file, 1:v7.02 file\n self.fst_exe_type = 1 # 0:v7.01 exe, v7.02 exe\n#ala Katherine's: fst_file_type = Enum(0, (0,1),iotype='in', desc='Fst file type, 0=old FAST, 1 = new FAST') \n\n ## new way: basic assumption is that .fst file is self contained, ie we could just run FAST on it. \n #read everything from templates, modify them selectively, then write them back out in \"run_dir\"\n self.ptfm_file = None\n self.twr_file = None\n self.adams_file = None\n self.blade1_file = None\n self.blade2_file = None\n self.blade3_file = None\n self.ad_file = None\n self.noise_file = None\n\n self.wamit_path = None\n self.wind_file = None\n \n self.exec_count = 0\n\n # we need to ensure the template file are not overwritten. run_dir can't be same as fst_dir\n self.run_dir = 'run_dir'", "def example():\n fast = runFAST()\n fast.fst_exe = \"/Users/pgraf/opt/windcode-7.31.13/build/FAST_glin64\"\n\n case = 1\n if (case==1):\n # fast.fst_dir = \"/Users/pgraf/work/wese/AeroelasticSE-1_3_14/src/AeroelasticSE/FAST_VT/OC3_Files/\" ## either abs or rel path ok.\n fast.fst_dir = \"ModelFiles/OC3_Files/\"\n fast.fst_file = \"NRELOffshrBsline5MW_Monopile_RF.fst\" ## should _not_ be full path, is in relation to fst_dir\n elif case == 2:\n fast.fst_dir = \"/Users/pgraf/work/wese/fatigue12-13/from_gordie/SparFAST3.orig\"\n fast.fst_file = \"NRELOffshrBsline5MW_Floating_OC3Hywind.fst\"\n elif case == 3:\n fast.fst_exe = \"/Users/pgraf/opt/windcode-7.31.13/build/FAST_regular_glin64\"\n fast.fst_dir = \"ModelFiles/Noise_Files/\"\n fast.fst_file = \"NREL5MW_Monopile_Rigid.v7.02.fst\" \n else:\n print \"unknown test case for runFAST.py\"\n sys.exit()\n\n fast.run_dir = \"new_run_dir\" ## either abs or rel path ok\n# fast.run_dir = \"/Users/pgraf/work/wese/AeroelasticSE-1_3_14/src/AeroelasticSE/another_run_dir\"\n\n\n ## all changes to FAST input params go through dict\n ## unless you want to change the name of the template for the sub-files (e.g. subst a different aerodyn file). Those go through actual fields (\"fast.ad_file\")\n fast.fstDict['Vhub']=8.0\n fast.fstDict['RotSpeed'] = 12.03\n fast.fstDict['TMax'] = 2.0\n fast.fstDict['TStart'] = 0.0\n\n fast.fstDict['PlatformDir'] = 30.0\n\n fast.setOutputs(['RotPwr'])\n\n fast.execute()\n out = fast.getOutputValue(\"RotPwr\")\n\n print fast\n print \"max power\"\n print max(out)", "def main (cmd):\n#################################################################\n#step 1: generate common block list report from block list folder\n#################################################################\n#step 2: folder read learning list\n\trecord_sequences = sequence_pattern_mining.multi_file_folder_sequence_learning_list_read(cmd)\n\tresult = sequence_pattern_mining.multi_file_sequence_prior_matrix_gen (record_sequences)\n\tprior_matrix = result[0]\n\tdone_block_name_list = result[1]\n\tprint \"block total number\"\n\tprint len (done_block_name_list)\n\n\t#done block name list contains all block names we need to ananlyze\n\t#left_candidate_list = sequence_pattern_mining.generate_critical_path(done_block_name_list, prior_matrix)\n\ttotal_result = sequence_pattern_mining.topology_path_find(done_block_name_list, prior_matrix)\n\tprint record_sequences[3]\n\tsequence_pattern_mining.sub_path_list_verify_2 (total_result , record_sequences[3])\n\t\t\n\n\t\"\"\"\n\tfor sub_list in total_result:\n\t\tsequence_pattern_mining.sub_path_list_verify_2 (sub_list , record_sequences[0])\n\t\tsequence_pattern_mining.sub_path_list_verify_2 (sub_list , record_sequences[1])\n\t\tsequence_pattern_mining.sub_path_list_verify_2 (sub_list , record_sequences[2])\n\t\tsequence_pattern_mining.sub_path_list_verify_2 (sub_list , record_sequences[3])\n\t\"\"\"\n\n\t\n\t\"\"\"\n\tstatus_matrix = sequence_pattern_mining.gen_status_matrix_based_on_prior_matrix(done_block_name_list, prior_matrix)\n\t\n\tdone_block_name_list = ['block_106','block_75', 'block_22', 'block_72', 'block_73' ]\n\tsub_path_list = sequence_pattern_mining.find_sub_path (done_block_name_list, status_matrix, prior_matrix)\n\tprint sub_path_list\n\t\n\tdone_block_name_list = ['block_106','block_75', 'block_22', 'block_73', 'block_72' ]\n\tsub_path_list = sequence_pattern_mining.find_sub_path (done_block_name_list, status_matrix, prior_matrix)\n\tprint sub_path_list\n\t\n\tgrowing_pattern = [ 'block_22', 'block_73', 'block_75','block_106']\n\tinsert_pos = sequence_pattern_mining.search_sub_block_insert_into_growing_pattern_position_1 (growing_pattern, \"block_72\", status_matrix)\n\tprint insert_pos\n\tnew_list = list(growing_pattern)\n\tnew_list.insert(insert_pos, \"block_72\")\n\tprint new_list\n\t\n\tgrowing_pattern = [ 'block_22', 'block_72', 'block_75','block_106']\t\n\tinsert_pos = sequence_pattern_mining.search_sub_block_insert_into_growing_pattern_position_1 (growing_pattern, \"block_73\", status_matrix)\n\tprint insert_pos\n\tnew_list = list(growing_pattern)\n\tnew_list.insert(insert_pos, \"block_73\")\n\tprint new_list\n\t\"\"\"", "def build(self, is_easy=False) -> None:\n allocation = ['train', 'dev', 'test']\n\n bm25_helper = self.__build_bm25_helper(is_easy)\n\n for entry in allocation:\n with open(self.__json_location + '/merged_' + entry + '.json', 'r') as f:\n json_data = json.load(f)\n\n output_file_name = 'data_' + entry\n if is_easy:\n json2training_converter = Json2EasyTraining(json_data, bm25_helper)\n output_file_name += '_easy'\n else:\n json2training_converter = JSON2Training(json_data, bm25_helper)\n\n training_set = json2training_converter.convert()\n dialog_lookup_table = json2training_converter.get_dialog_lookup_table()\n\n self.__write_tsv(output_file_name + '.tsv', training_set)\n self.__write_array(output_file_name + '_lookup' '.txt', dialog_lookup_table)", "def main(FLAGS):\n if FLAGS.format == 'tfrecords':\n raise NotImplementedError\n else:\n # get the names of the train image files\n train_files = txt2list(FLAGS.train_file_names)\n train_limit = floor(FLAGS.train_fraction * FLAGS.n_train)\n train_count = 0\n train_full = False\n\n # get the names of the validation image files\n valid_files = txt2list(FLAGS.valid_file_names)\n valid_limit = floor(FLAGS.valid_fraction * FLAGS.n_valid)\n valid_count = 0\n valid_full = False\n\n # get the names of the test image files\n test_files = txt2list(FLAGS.test_file_names)\n test_limit = floor(FLAGS.test_fraction * FLAGS.n_test)\n test_count = 0\n test_full = False\n\n # accumulators for the image and annotation pairs\n train_windows_with = []\n valid_windows_with = []\n test_windows_with = []\n train_windows_without = []\n valid_windows_without = []\n test_windows_without = []\n train_locations = []\n valid_locations = []\n test_locations = []\n\n # directories of sensor data and annotations\n sub_dirs = glob(os.path.join(FLAGS.satnet_data_dir, '*'))\n\n # go through each sensor collection from each site and prepare\n # the training, validation, and testing sub-windows\n for dir in sub_dirs:\n if train_full and valid_full and test_full:\n pass\n else:\n img_files = glob(os.path.join(dir, 'ImageFiles', '*.fits'))\n json_files = glob(os.path.join(dir, 'Annotations', '*.json'))\n\n # get only the name of the .json file w/o extension\n json_names = [file.split(\"\\\\\")[-1] for file in json_files]\n json_names = [name.split(\".json\")[0] for name in json_names]\n\n # get only the name of the .fits file w/o extension\n img_names = [file.split(\"\\\\\")[-1] for file in img_files]\n img_names = [name.split(\".fits\")[0] for name in img_names]\n\n # in case some annotations/images aren't paired, find the\n # common .json and .fits files names\n similar_files = set(img_names).intersection(json_names)\n\n # prepare the new images and annotations via the sliding-window\n # algorithm\n for file in similar_files:\n if train_full and valid_full and test_full:\n pass\n else:\n # load SatNet image and its corresponding annotations\n img_path = os.path.join(dir, 'ImageFiles', file + '.fits')\n anno_path = os.path.join(dir, 'Annotations', file + '.json')\n image = SatelliteImage(img_path)\n anno = ImageAnnotations(anno_path)\n\n # find the data partition this example belongs to and add\n # that data to the accumulators\n comp_name = '_'.join([anno.directory, anno.name])\n\n # pull all object centroids in the image and store in a list\n centroids = []\n [centroids.append([obj.y_c, obj.x_c]) for obj in anno.objects]\n\n # run sliding window algorithm across the image\n sw = SatNetSubWindows(img=image.image,\n centroids=centroids,\n window_size=FLAGS.window_size,\n stride=FLAGS.stride,\n padding=FLAGS.padding,\n img_width=FLAGS.width,\n img_height=FLAGS.height)\n sw.get_obj_windows()\n\n # find how many background windows to include from the image\n # and generate that many number of random indices to pull\n # them\n if sw.windows_with is not None:\n n_with = sw.windows_with.shape[0]\n n_without = int(FLAGS.bg2sat_ratio * n_with)\n else:\n n_without = int(FLAGS.bg2sat_ratio)\n inds = np.random.permutation(sw.windows_without.shape[0])\n inds = inds[:n_without]\n\n # determine the status of the accumulators\n if train_count >= train_limit:\n train_full = True\n if valid_count >= valid_limit:\n valid_full = True\n if test_count >= test_limit:\n test_full = True\n\n # accumulate sub-windows into the three data\n # partitions\n if comp_name in train_files and not train_full:\n if sw.windows_with is not None:\n train_windows_with.append(sw.windows_with)\n train_locations.append(sw.object_location_with)\n train_windows_without.append(sw.windows_without[inds, :, :])\n train_count += 1\n elif comp_name in valid_files and not valid_full:\n if sw.windows_with is not None:\n valid_windows_with.append(sw.windows_with)\n valid_locations.append(sw.object_location_with)\n valid_windows_without.append(sw.windows_without[inds, :, :])\n valid_count += 1\n elif comp_name in test_files and not test_full and FLAGS.save_test:\n if sw.windows_with is not None:\n test_windows_with.append(sw.windows_with)\n test_locations.append(sw.object_location_with)\n test_windows_without.append(sw.windows_without[inds, :, :])\n test_count += 1\n else:\n print('Windows belong to a filled accumulator... skipped them.')\n pass\n print('Accumulators: train - {}% , valid - {}% , test - {}%'.format(\n int(train_count / train_limit * 100),\n int(valid_count / valid_limit * 100),\n int(test_count / test_limit * 100)))\n\n # combine all of the sub-windows and annotations for each data\n # partition\n train_windows_with = np.concatenate(train_windows_with)\n train_windows_without = np.concatenate(train_windows_without)\n train_locations = np.concatenate(train_locations)\n train_annos_with = np.ones(train_windows_with.shape[0])\n train_annos_without = np.zeros(train_windows_without.shape[0])\n valid_windows_with = np.concatenate(valid_windows_with)\n valid_windows_without = np.concatenate(valid_windows_without)\n valid_locations = np.concatenate(valid_locations)\n valid_annos_with = np.ones(valid_windows_with.shape[0])\n valid_annos_without = np.zeros(valid_windows_without.shape[0])\n\n if FLAGS.save_test:\n test_windows_with = np.concatenate(test_windows_with)\n test_windows_without = np.concatenate(test_windows_without)\n test_locations = np.concatenate(test_locations)\n test_annos_with = np.ones(test_windows_with.shape[0])\n test_annos_without = np.zeros(test_windows_without.shape[0])\n\n train_windows = np.concatenate((train_windows_with, train_windows_without))\n train_annos = np.concatenate((train_annos_with, train_annos_without))\n valid_windows = np.concatenate((valid_windows_with, valid_windows_without))\n valid_annos = np.concatenate((valid_annos_with, valid_annos_without))\n\n if FLAGS.save_test:\n test_windows = np.concatenate((test_windows_with, test_windows_without))\n test_annos = np.concatenate((test_annos_with, test_annos_without))\n\n path_append = '_seedNet2satNet_windowsize_{}_stride_{}_padding_{}_ratio_{}_trainfraction_{}.h5'.format(FLAGS.window_size, FLAGS.stride, FLAGS.padding, FLAGS.bg2sat_ratio, FLAGS.train_fraction)\n train_c_windows_path = os.path.join(FLAGS.save_data_dir, 'train_classification_windows' + path_append)\n train_c_labels_path = os.path.join(FLAGS.save_data_dir, 'train_classification_labels' + path_append)\n train_l_windows_path = os.path.join(FLAGS.save_data_dir, 'train_localization_windows' + path_append)\n train_l_labels_path = os.path.join(FLAGS.save_data_dir, 'train_localization_labels' + path_append)\n valid_c_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_windows' + path_append)\n valid_c_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_labels' + path_append)\n valid_l_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_windows' + path_append)\n valid_l_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_labels' + path_append)\n\n if FLAGS.save_test:\n test_c_windows_path = os.path.join(FLAGS.save_data_dir, 'test_classification_windows' + path_append)\n test_c_labels_path = os.path.join(FLAGS.save_data_dir, 'test_classification_labels' + path_append)\n test_l_windows_path = os.path.join(FLAGS.save_data_dir, 'test_localization_windows' + path_append)\n test_l_labels_path = os.path.join(FLAGS.save_data_dir, 'test_localization_labels' + path_append)\n\n write_hdf5(train_c_windows_path, train_windows)\n write_hdf5(train_c_labels_path, train_annos)\n write_hdf5(train_l_windows_path, train_windows_with)\n write_hdf5(train_l_labels_path, train_locations)\n write_hdf5(valid_c_windows_path, valid_windows)\n write_hdf5(valid_c_labels_path, valid_annos)\n write_hdf5(valid_l_windows_path, valid_windows_with)\n write_hdf5(valid_l_labels_path, valid_locations)\n\n if FLAGS.save_test:\n write_hdf5(test_c_windows_path, test_windows)\n write_hdf5(test_c_labels_path, test_annos)\n write_hdf5(test_l_windows_path, test_windows_with)\n write_hdf5(test_l_labels_path, test_locations)", "def main():\n\n # Create argument parser\n parser = ArgumentParser()\n parser.add_argument('datadir', type=str, help='Directory of LC files')\n parser.add_argument('metatable', type=str,\n help='Metatable containing each object, redshift, peak time guess, mwebv, object type')\n parser.add_argument('--zpt', type=float, default=DEFAULT_ZPT, help='Zero point of LCs')\n parser.add_argument('--lm', type=float, default=DEFAULT_LIM_MAG, help='Survey limiting magnitude')\n parser.add_argument('--outdir', type=str, default='./products/',\n help='Path in which to save the LC data (single file)')\n args = parser.parse_args()\n\n objs, redshifts, obj_types, peaks, ebvs = read_in_meta_table(args.metatable)\n\n # Grab all the LC files in the input directory\n file_names = []\n for obj in objs:\n file_name = args.datadir + 'PS1_PS1MD_' + obj + '.snana.dat'\n file_names.append(file_name)\n\n # Create a list of LC objects from the data files\n lc_list = read_in_LC_files(file_names, objs)\n\n # This needs to be redone when retrained\n # TODO: Need to change this whenever you retrain...\n filt_dict = {'g': 0, 'r': 1, 'i': 2, 'z': 3}\n wvs = np.asarray([5460, 6800, 7450, 8700])\n\n # Update the LC objects with info from the metatable\n my_lcs = []\n for i, my_lc in enumerate(lc_list):\n my_lc.add_LC_info(zpt=args.zpt, mwebv=ebvs[i],\n redshift=redshifts[i], lim_mag=args.lm,\n obj_type=obj_types[i])\n my_lc.get_abs_mags()\n my_lc.sort_lc()\n pmjd = my_lc.find_peak(peaks[i])\n my_lc.shift_lc(pmjd)\n my_lc.correct_time_dilation()\n my_lc.filter_names_to_numbers(filt_dict)\n my_lc.correct_extinction(wvs)\n my_lc.cut_lc()\n my_lc.make_dense_LC(4)\n my_lcs.append(my_lc)\n save_lcs(my_lcs, args.outdir)", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def optimised_structure(self):\n\n with open(f'gj_{self.molecule.name}.log', 'r') as log_file:\n\n lines = log_file.readlines()\n\n opt_coords_pos = []\n for pos, line in enumerate(lines):\n if 'Input orientation' in line:\n opt_coords_pos.append(pos + 5)\n\n start_pos = opt_coords_pos[-1]\n\n num_atoms = len(self.molecule.molecule['input'])\n\n opt_struct = []\n\n for pos, line in enumerate(lines[start_pos: start_pos + num_atoms]):\n\n vals = line.split()[-3:]\n vals = [self.molecule.molecule['input'][pos][0]] + [float(i) for i in vals]\n opt_struct.append(vals)\n\n return opt_struct", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def compInit(self,compInfo, node, modelInfo, subcktName,dir_name,transInfo):\n print \"CompInfo inside compInit function : compInit------->\",compInfo\n #### initial processing to check if MOs is present. If so, library to be used is BondLib\n modelicaCompInit = []\n numNodesSub = {} \n mosInfo = {}\n IfMOS = '0'\n for eachline in compInfo:\n #words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n if len(subcktName) > 0:\n subOptionInfo = []\n subSchemInfo = []\n for eachsub in subcktName:\n filename_tem = eachsub + '.sub'\n filename_tem = os.path.join(dir_name, filename_tem)\n data = self.readNetlist(filename_tem)\n subOptionInfo, subSchemInfo = self.separateNetlistInfo(data)\n \n for eachline in subSchemInfo:\n #words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n \n #Lets Start with Source details\n for eachline in self.sourceDetail:\n eachline = eachline.lower()\n words = eachline.split()\n typ = words[3].split('(')\n if typ[0] == \"pulse\":\n per = words[9].split(')')\n stat = self.mappingData[\"Sources\"][typ[0]]+' '+words[0]+'(rising = '+words[6]+', V = '+words[4]\\\n +', width = '+words[8]+', period = '+per[0]+', offset = '+typ[1]+', startTime = '+words[5]+', falling = '+words[7]+');' \n modelicaCompInit.append(stat)\n if typ[0] == \"sine\":\n theta = words[7].split(')')\n stat = self.mappingData[\"Sources\"][typ[0]]+' '+words[0]+'(offset = '+typ[1]+', V = '+words[4]+', freqHz = '+words[5]+', startTime = '+words[6]+', phase = '+theta[0]+');'\n modelicaCompInit.append(stat)\n if typ[0] == \"pwl\":\n keyw = self.mappingData[\"Sources\"][typ[0]]+' '\n stat = keyw + words[0] + '(table = [' + typ[1] + ',' + words[4] + ';'\n length = len(words);\n for i in range(6,length,2):\n if i == length-2:\n w = words[i].split(')')\n stat = stat + words[i-1] + ',' + w[0] \n else:\n stat = stat + words[i-1] + ',' + words[i] + ';'\n stat = stat + ']);'\n modelicaCompInit.append(stat) \n if typ[0] == words[3] and typ[0] != \"dc\":\n #It is DC constant but no dc keyword\n val_temp = typ[0].split('v')\n stat = self.mappingData[\"Sources\"][\"dc\"]+' ' + words[0] + '(V = ' + val_temp[0] + ');' \n modelicaCompInit.append(stat)\n elif typ[0] == words[3] and typ[0] == \"dc\":\n stat = self.mappingData[\"Sources\"][typ[0]]+' ' + words[0] + '(V = ' + words[4] + ');' ### check this\n modelicaCompInit.append(stat)\n \n #Lets start for device\n for eachline in self.deviceDetail:\n words=eachline.split()\n if eachline[0]=='d' or eachline[0]=='D':\n if len(words)>3:\n if modelInfo[words[3]].has_key('n'):\n n = float(modelInfo[words[3]]['n'])\n else:\n n = 1.0\n vt = str(float(0.025*n))\n stat = self.mappingData[\"Devices\"][eachline[0]]+' '+ words[0] + '(Ids = ' + modelInfo[words[3]]['is'] + ', Vt = ' + vt + ', R = 1e12' +');'\n else:\n stat = self.mappingData[\"Devices\"][eachline[0]]+' '+ words[0] +';'\n modelicaCompInit.append(stat)\n \n elif eachline[0]=='q' or eachline[0]=='Q':\n if words[4]=='npn':\n start = 'Analog.Semiconductors.NPN '\n elif words[4]=='pnp':\n start = 'Analog.Semiconductors.PNP '\n \n inv_vak = float(self.tryExists(modelInfo,words,4, 'vaf', 50))\n vak_temp = 1/inv_vak\n vak = str(vak_temp)\n bf = self.tryExists(modelInfo,words,4, 'bf', 50)\n br = self.tryExists(modelInfo,words,4, 'br', 0.1)\n Is = self.tryExists(modelInfo,words,4, 'is', 1e-16)\n tf = self.tryExists(modelInfo,words,4, 'tf', 1.2e-10)\n tr = self.tryExists(modelInfo,words,4, 'tr', 5e-9)\n cjs = self.tryExists(modelInfo,words,4, 'cjs', 1e-12)\n cje = self.tryExists(modelInfo,words,4, 'cje', 4e-13)\n cjc = self.tryExists(modelInfo,words,4, 'cjc', 5e-13)\n vje = self.tryExists(modelInfo,words,4, 'vje', 0.8)\n mje = self.tryExists(modelInfo,words,4, 'mje', 0.4)\n vjc = self.tryExists(modelInfo,words,4, 'vjc', 0.8)\n mjc = self.tryExists(modelInfo,words,4, 'mjc', 0.333)\n stat = start + words[0] +'(Bf = ' + bf + ', Br = ' + br + ', Is = ' +Is+ ', Vak = ' + vak + ', Tauf = ' +tf+ ', Taur = ' +tr+ ', Ccs = ' +cjs+ ', Cje = ' +cje+ ', Cjc = ' +cjc+ ', Phie = ' + vje + ', Me = ' + mje + ', Phic = ' + vjc + ', Mc = ' + mjc + ');'\n modelicaCompInit.append(stat)\n \n elif eachline[0]=='m' or eachline[0]==\"M\":\n print \"Starting Mosfet\"\n eachline = eachline.split(words[5])\n eachline = eachline[1]\n eachline = eachline.strip()\n eachline = eachline.replace(' = ', '=').replace('= ','=').replace(' =','=').replace(' * ', '*').replace(' + ', '+').replace(' { ', '').replace(' } ', '')\n eachline = eachline.split()\n mosInfo[words[0]] = {}\n for each in eachline:\n if len(each) > 1:\n each = each.split('=')\n mosInfo[words[0]][each[0]] = each[1]\n trans = transInfo[words[5]]\n if trans == 'nmos':\n start = 'BondLib.Electrical.Analog.Spice.Mn '\n else:\n start = 'BondLib.Electrical.Analog.Spice.Mp '\n vto = self.tryExists(modelInfo,words,5,'vto',0)\n gam = self.tryExists(modelInfo,words,5,'gamma',0)\n phi = self.tryExists(modelInfo,words,5, 'phi', 0)\n ld = self.tryExists(modelInfo,words,5,'ld',0)\n uo = self.tryExists(modelInfo,words,5,'uo',0)\n lam = self.tryExists(modelInfo,words,5,'lambda',0)\n tox = self.tryExists(modelInfo,words,5,'tox',3e-9)\n pb = self.tryExists(modelInfo,words,5, 'pb',0.8)\n cj = self.tryExists(modelInfo,words,5, 'cj',0)\n cjsw = self.tryExists(modelInfo,words,5, 'cjsw',1e-9)\n mj = self.tryExists(modelInfo,words,5, 'mj',0.33)\n mjsw = self.tryExists(modelInfo,words,5, 'mjsw',0.33)\n cgdo = self.tryExists(modelInfo,words,5, 'cgdo',0)\n js = self.tryExists(modelInfo,words,5, 'js',0)\n cgbo = self.tryExists(modelInfo,words,5, 'cgbo',0)\n cgso = self.tryExists(modelInfo,words,5,'cgso',0)\n try:\n l = mosInfo[words[0]]['l']\n except KeyError:\n l = '1e-6'\n try:\n w = mosInfo[words[0]]['w']\n except KeyError:\n w = '100e-6'\n try:\n As = mosInfo[words[0]]['as']\n ad = mosInfo[words[0]]['ad']\n except KeyError:\n As = '0'\n ad = '0'\n try:\n ps = mosInfo[words[0]]['ps']\n pd = mosInfo[words[0]]['pd']\n except KeyError:\n ps = '0'\n pd = '0'\n stat = start + words[0] + '(Tnom = 300, VT0 = ' + vto + ', GAMMA = ' + gam + ', PHI = ' + phi + ', LD = ' +ld+ ', U0 = ' + str(float(uo)*0.0001) + ', LAMBDA = ' + lam + ', TOX = ' +tox+ ', PB = ' + pb + ', CJ = ' +cj+ ', CJSW = ' +cjsw+ ', MJ = ' + mj + ', MJSW = ' + mjsw + ', CGD0 = ' +cgdo+ ', JS = ' +js+ ', CGB0 = ' +cgbo+ ', CGS0 = ' +cgso+ ', L = ' +l+ ', W = ' + w + ', Level = 1' + ', AD = ' + ad + ', AS = ' + As + ', PD = ' + pd + ', PS = ' + ps + ');'\n stat = stat.translate(maketrans('{}', ' '))\n modelicaCompInit.append(stat)\n \n #Lets start for Subcircuit\n for eachline in self.subCktDetail:\n print \"each Line-------->\",eachline\n global point\n global subname\n temp_line = eachline.split()\n temp = temp_line[0].split('x')\n index = temp[1]\n for i in range(0,len(temp_line),1):\n if temp_line[i] in subcktName:\n subname = temp_line[i]\n numNodesSub[subname] = i - 1\n point = i\n if len(temp_line) > point + 1:\n rem = temp_line[point+1:len(temp_line)]\n rem_new = ','.join(rem)\n stat = subname + ' ' + subname +'_instance' + index + '(' + rem_new + ');'\n else:\n stat = subname + ' ' + subname +'_instance' + index + ';'\n modelicaCompInit.append(stat)\n \n \n for eachline in compInfo:\n words = eachline.split()\n #val = words[3]\n #value = self.splitIntoVal(val)\n value = self.getUnitVal(words[-1])\n if eachline[0] == 'r':\n stat = 'Analog.Basic.Resistor ' + words[0] + '(R = ' + value + ');'\n modelicaCompInit.append(stat)\n elif eachline[0] == 'c':\n stat = 'Analog.Basic.Capacitor ' + words[0] + '(C = ' + value + ');'\n modelicaCompInit.append(stat)\n elif eachline[0] == 'l':\n stat = 'Analog.Basic.Inductor ' + words[0] + '(L = ' + value + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'e':\n stat = 'Analog.Basic.VCV ' + words[0] + '(gain = ' + self.getUnitVal(words[5]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'g':\n stat = 'Analog.Basic.VCC ' + words[0] + '(transConductance = ' + self.getUnitVal(words[5]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'f':\n stat = 'Analog.Basic.CCC ' + words[0] + '(gain = ' + self.getUnitVal(words[4]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'h':\n stat = 'Analog.Basic.CCV ' + words[0] + '(transResistance = ' + self.getUnitVal(words[4]) + ');'\n modelicaCompInit.append(stat)\n \n else:\n continue\n \n if '0' or 'gnd' in node:\n modelicaCompInit.append('Analog.Basic.Ground g;')\n return modelicaCompInit, numNodesSub", "def main(params):\n\n train = []\n test = []\n imdir = params['dest'] + '/{0}/COCO_{0}_{1:012d}.jpg'\n\n if params['v'] == 2:\n train_annotations_file = params['dir'] + '/v2_mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/v2_mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/v2_Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n else:\n train_annotations_file = params['dir'] + '/mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n\n if params['split'] == 1:\n\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n\n answer_dict = sum_over_occurences(train_anno['annotations'][i]['answers'])\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n\n # A modification to count the number of occurences of each answer and then store\n # them in the json file as well\n answer_dict = sum_over_occurences(val_anno['annotations'][i]['answers'])\n\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n else:\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n test_ques = json.load(open(test_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'test2015'\n for i in range(len(test_ques['questions'])):\n print(test_ques.keys())\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = test_ques['questions'][i]['question_id']\n image_path = imdir.format(subtype, test_ques['questions'][i]['image_id'])\n\n question = test_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n print('Training sample %d, Testing sample %d...' % (len(train), len(test)))\n\n if v2:\n json.dump(train, open('data/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/vqa_raw_test.json', 'w'))\n else:\n json.dump(train, open('data/VQAv1/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/VQAv1/vqa_raw_test.json', 'w'))", "def __init__(self, cfg, call_from='training'):\n \n utils.write_log(print_prefix+'Init era5_mesh obj...')\n utils.write_log(print_prefix+'Read input files...')\n \n # collect global attr\n self.era_src=cfg['TRAINING']['era5_src']\n self.ntasks=int(cfg['SHARE']['ntasks'])\n self.varlist=['u10','v10','msl', 'z']\n self.dsmp_interval=int(cfg['SHARE']['dsmp_interval'])\n\n self.s_sn, self.e_sn = int(cfg['SHARE']['s_sn']),int(cfg['SHARE']['e_sn'])\n self.s_we, self.e_we = int(cfg['SHARE']['s_we']),int(cfg['SHARE']['e_we'])\n\n if call_from=='training':\n \n timestamp_start=datetime.datetime.strptime(\n cfg['TRAINING']['training_start']+'00','%Y%m%d%H')\n timestamp_end=datetime.datetime.strptime(\n cfg['TRAINING']['training_end']+'23','%Y%m%d%H')\n all_dateseries=pd.date_range(\n start=timestamp_start, end=timestamp_end, freq='6H')\n\n self.dateseries=self._pick_date_frame(cfg, all_dateseries)\n \n elif call_from=='inference':\n fn_stream=subprocess.check_output(\n 'ls '+self.era_src+'wrfout*', shell=True).decode('utf-8')\n fn_list=fn_stream.split()\n start_basename=fn_list[0].split('/')[-1]\n if cfg['INFERENCE'].getboolean('debug_mode'):\n utils.write_log(print_prefix+'Debug mode turns on!')\n end_basename=fn_list[self.ntasks-1].split('/')[-1]\n else:\n end_basename=fn_list[-1].split('/')[-1]\n timestamp_start=datetime.datetime.strptime(start_basename[11:],'%Y-%m-%d_%H:%M:%S')\n timestamp_end=datetime.datetime.strptime(end_basename[11:],'%Y-%m-%d_%H:%M:%S')\n self.dateseries=pd.date_range(start=timestamp_start, end=timestamp_end, freq='H')\n \n self.load_data()", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def process_input_file(filename):\n\n # Parse the input file\n try:\n ast = parser.parse(open(filename, 'r').read())\n except pyparsing.ParseBaseException as e:\n print \"Parse error in %s: %s\" % (os.path.basename(filename), str(e))\n sys.exit(1)\n\n ofinput = of_g.OFInput()\n\n # Now for each structure, generate lists for each member\n for s in ast:\n if s[0] == 'struct':\n name = s[1].replace(\"ofp_\", \"of_\", 1)\n members = [dict(m_type=x[0], name=x[1]) for x in s[2]]\n ofinput.classes[name] = members\n ofinput.ordered_classes.append(name)\n if name in type_maps.inheritance_map:\n # Clone class into header class and add to list\n ofinput.classes[name + \"_header\"] = members[:]\n ofinput.ordered_classes.append(name + \"_header\")\n elif s[0] == 'metadata':\n if s[1] == 'version':\n log(\"Found version: wire version \" + s[2])\n if s[2] == 'any':\n ofinput.wire_versions.update(of_g.wire_ver_map.keys())\n elif int(s[2]) in of_g.supported_wire_protos:\n ofinput.wire_versions.add(int(s[2]))\n else:\n debug(\"Unrecognized wire protocol version\")\n sys.exit(1)\n found_wire_version = True\n\n if not ofinput.wire_versions:\n debug(\"Missing #version metadata\")\n sys.exit(1)\n\n return ofinput", "def runStructure(fileext, included, excluded, options):\n\n datapath_contents = os.listdir(options.datapath)\n\n experiment_structure = {}\n replicate_file_list=[]\n included_reps=[]\n excluded_reps=[]\n for thing in datapath_contents:\n if os.path.isdir(\"%s/%s\" % (options.datapath,thing)):\n condition_files = os.listdir(\"%s/%s\" % (options.datapath,thing))\n replicates = []\n for file in condition_files:\n if re.match(\".+\\.%s$\" % fileext, file):\n file_fullpath = \"%s/%s/%s\" % (options.datapath,thing,file)\n poss_filenames = [file, file_fullpath,\n re.sub(\".gbgout\",\"\",file),\n re.sub(\".gbgout\",\"\",file_fullpath)]\n\n if len(set(poss_filenames).intersection(set(excluded)))==0:\n replicates.append(\"%s/%s/%s\" % (options.datapath,\n thing,\n file\n )\n )\n replicate_file_list.append(\"%s/%s/%s\" \\\n \"\" % (options.datapath,\n thing,\n file\n )\n )\n else:\n excluded_reps.append(file_fullpath)\n\n # mark files if they are in the includelist\n if len(included)>0:\n for file in replicates:\n base_file = os.path.basename(file)\n if base_file in included:\n included_reps.append(base_file)\n\n experiment_structure[thing]=replicates\n\n return(experiment_structure, replicate_file_list, included_reps, excluded_reps)", "def __init__(self):\n\n if whichsimulation == 0: # Mini-Millennium\n self.Hubble_h = 0.73\n self.BoxSize = 62.5 # Mpc/h\n self.MaxTreeFiles = 8 # FilesPerSnapshot\n self.OmegaM = 0.25\n self.OmegaL = 0.75\n self.z = 6.197\n self.aList = \"../input/treefiles/millennium/millennium.a_list\"\n\n elif whichsimulation == 1: # Full Millennium\n self.Hubble_h = 0.73\n self.BoxSize = 500 # Mpc/h\n self.MaxTreeFiles = 512 # FilesPerSnapshot\n self.OmegaM = 0.25\n self.OmegaL = 0.75\n self.z = 6.197\n self.aList = \"/lustre/projects/p014_swin/raw_data/millennium/full/millennium.a_list\"\n\n elif whichsimulation == 2: # Full Bolshoi\n self.Hubble_h = 0.7\n self.BoxSize = 250 # Mpc/h\n self.MaxTreeFiles = 125 # FilesPerSnapshot\n self.OmegaM = 0.27\n self.OmegaL = 0.73\n self.z = 6.197\n self.aList = \"/lustre/projects/p004_swin/msinha/tao/data_products/input/bolshoi-planck/lhalotree/run1/bolshoi-planck.a_list\"\n\n else:\n print \"Please pick a valid simulation!\"\n exit(1)", "def input_models():\n return [\n PDBFile(\n Path(golden_data, \"protdna_complex_1.pdb\"),\n path=golden_data,\n score=42.0,\n restr_fname=Path(golden_data, \"example_ambig_1.tbl\")\n ),\n PDBFile(\n Path(golden_data, \"protdna_complex_2.pdb\"),\n path=golden_data,\n score=28.0,\n restr_fname=Path(golden_data, \"example_ambig_2.tbl\")\n )]", "def load_M4_comp(iType = None) :\n\n tsspecs = {};\n \n trainfile = \"https://github.com/antoinecarme/pyaf/blob/master/data/M4Comp/M4Comp_\" + iType + \".csv.gz?raw=true\"\n # trainfile = \"data/M4Comp/M4Comp_\" + iType + \".csv.gz\"\n\n df_full = pd.read_csv(trainfile, sep=',', header=0, engine='python', compression='gzip');\n lHorizons = df_full[['H' , 'ID']].copy();\n lHorizons['ID'] = lHorizons['ID'].apply(lambda x : x.replace(\" \", \"\"));\n lHorizons['H'] = lHorizons['H'].apply(lambda x : int(x))\n\n for i in range(0, df_full.shape[0]):\n tsspec = cTimeSeriesDatasetSpec();\n series_name = lHorizons['ID'][i]\n tsspec.mName = series_name;\n\n if(((i+1) % 500) == 0):\n print(\"loading \", i+1 , \"/\" , df_full.shape[0] , series_name);\n tsspec.mPastData = pd.Series(df_full['PAST'][i].split(\",\")).apply(float);\n tsspec.mFutureData = pd.Series(df_full['FUTURE'][i].split(\",\")).apply(float);\n tsspec.mFullDataset = pd.DataFrame();\n tsspec.mFullDataset[series_name] = pd.concat((tsspec.mPastData, tsspec.mFutureData), axis = 0).reindex();\n tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])\n tsspec.mTimeVar = \"Date\";\n tsspec.mSignalVar = series_name;\n tsspec.mFullDataset.reindex()\n tsspec.mHorizon = {};\n tsspec.mHorizon[series_name] = lHorizons['H'][i];\n tsspec.mCategory = \"M4Comp\";\n tsspecs[tsspec.mName] = tsspec;\n\n return tsspecs", "def __init__(self, travel_model_dir_name, mode='full', years_to_run=None, procedure_file=\"opus.par\"):\n\n\ttravel_model_configuration = {}\n\t\n\ttravel_model_configuration.update( {'visum_version_number': 10} )\n\t\n\t### mapping from visum matrice name to urbansim travel_data variable name\n\t## dict key is used as matrix number for VisumPy.helpers.GetODMatrix and VisumPy.helpers.GetSkimMatrix\n\t## dict value is used as attribute name for urbansim travel_data table\n\ttm_to_urbansim_variables = {\n\t'od':{\n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1:'transit_trips', #'transit (PuT - public transport) trips',\n\t2:'auto_trips', #'auto trips',\n\t}, \n\t'skim':{ \n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1: 'auto_travel_time', #'auto assigned travel time (ttc)',\n\t2: 'transit_in_vehicle_time' #'PuT in-vehicle time (ivt)',\n\t} \n\t}\n \n\t### TAZ attributes to be transferred from urbansim to visum\n\turbansim_to_tm_variables = [\n\t 'TAZ=(zone.zone_id).astype(int16)',\n\t 'retail_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_retail)', \n\t ## the employment groups below need to be defined in employment_adhoc_sector_groups and \n\t ## employment_adhoc_sector_group_definitions before they can be used\n\t #'fires_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_fires)',\n\t #'gov_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_gov)',\n\t #\"educ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_educ)\",\n\t #\"wtcu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_wtcu)\",\n\t #\"manu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_manu)\",\n\t #\"univ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_univ)\",\n\t ## need to change income categories to 4 instead of 3\n\t \"low_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_low_income_households)\",\n\t \"mid_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_mid_income_households)\",\n\t #\"upper_mid_income_hh_by_taz=?\",\n\t \"upper_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_high_income_households)\",\n\t ## need variable specification\n\t #\"pctmf=?\",\n\t #\"gqi=?\",\n\t #\"gqn=?\",\n\t #\"fteuniv=?\",\n\t #\"density=?\"\n ]\n \n\ttravel_model_configuration.update( {\n\t \"tm_to_urbansim_variables\":tm_to_urbansim_variables,\n\t \"urbansim_to_tm_variables\":urbansim_to_tm_variables,\n\t} )\n\t\n\tself.__add_models(travel_model_configuration, mode)\n\tself.__add_years(travel_model_configuration, travel_model_dir_name, years_to_run, procedure_file)\n\n\tself.merge(travel_model_configuration)", "def generatePPC(self, nChainEntries=500):\n generatedData = []\n generatedNeutronSpectra=[]\n generatedDeuteronSpectra=[]\n totalChainSamples = len(self.chain[-50:,:,0].flatten())\n \n # TODO: this next line could mean we repeat the same sample, i think\n samplesToGet = np.random.randint(0, totalChainSamples, size=nChainEntries)\n for sampleToGet in samplesToGet:\n modelParams = []\n for nParam in range(self.nParams):\n modelParams.append(self.chain[-50:,:,nParam].flatten()[sampleToGet])\n \n \n e0, loc, scale, s = modelParams[:4]\n scaleFactorEntries = modelParams[4:4+self.nRuns]\n returnedData = [self.generateModelData([e0, loc, scale, s, scaleFactor],\n standoff, tofrange, tofbins,\n self.ddnXSinstance, self.stoppingModel.dEdx,\n self.beamTiming, self.nSamplesFromTOF, True) for\n scaleFactor, standoff, tofrange, tofbins\n in zip(scaleFactorEntries, \n self.standoffs[:self.nRuns],\n self.tof_range[:self.nRuns],\n self.tofRunBins[:self.nRuns])]\n # returned data is an array of .. a tuple (modelData, neutronSpectrum, deuteronSpectrum)\n modelData = []\n modelNeutronSpectrum = []\n modelDeuteronSpectrum=[]\n for retDat in returnedData:\n modelData.append(retDat[0])\n modelNeutronSpectrum.append(retDat[1])\n modelDeuteronSpectrum.append(retDat[2])\n generatedData.append(modelData)\n generatedNeutronSpectra.append(modelNeutronSpectrum)\n generatedDeuteronSpectra.append(modelDeuteronSpectrum)\n \n self.tofData = generatedData\n self.neutronSpectra= generatedNeutronSpectra\n self.deuteronSpectra = generatedDeuteronSpectra\n return (generatedData, \n generatedNeutronSpectra, \n generatedDeuteronSpectra)", "def samples_from_sfei_moorings(run_start,static_dir):\n # And pull some SFEI data:\n\n mooring_xy=[]\n mooring_salt=[]\n\n L2_dir='/opt/data/sfei/moored_sensors_csv/L2/'\n\n # tuples (<name in observation points shapefile>, <L2 data file name> )\n sfei_moorings=[\n ('ALV',\"ALV_all_data_L2.csv\"),\n ('SFEI_Coyote',\"COY_all_data_L2.csv\"),\n ('DB',\"DMB_all_data_L2.csv\"),\n ('SFEI_Guadalupe',\"GL_all_data_L2.csv\"),\n ('SFEI_Mowry',\"MOW_all_data_L2.csv\"),\n ('SFEI_Newark',\"NW_all_data_L2.csv\"),\n ('SFEI_A8Notch',\"POND_all_data_L2.csv\"),\n ('SMB',\"SM_all_data_L2.csv\")\n ]\n\n # lat/lon from observation-points\n # FRAGILE - FIX!\n obs_shp=wkb2shp.shp2geom(os.path.join(static_dir,\"observation-points.shp\"))\n\n for name,l2_file in sfei_moorings:\n print(name)\n fn=os.path.join(L2_dir,l2_file)\n if not os.path.exists(fn):\n logger.warning(\"No SFEI mooring data - will not be able to initialize LSB initial condition\")\n continue\n sfei=pd.read_csv(fn,parse_dates=['Datetime','dt'],low_memory=False)\n sfei_salt=sfei['S_PSU']\n valid=~(sfei_salt.isnull())\n # limit to data within 20 days of the request\n sfei_salt_now=utils.interp_near(utils.to_dnum(run_start),\n utils.to_dnum(sfei.Datetime[valid]),sfei_salt[valid],\n max_dx=20.0)\n geom=obs_shp['geom'][ np.nonzero(obs_shp['name']==name)[0][0] ]\n xy=np.array(geom)\n if np.isfinite(sfei_salt_now):\n mooring_xy.append(xy)\n mooring_salt.append(sfei_salt_now)\n\n if len(mooring_xy):\n xy=np.array(mooring_xy)\n sfei_init_salt=np.c_[xy[:,0],xy[:,1],mooring_salt]\n else:\n sfei_init_salt=np.zeros( (0,3),'f8')\n return sfei_init_salt", "def __init__(self):\n\n\t\t# PDB fields\n\t\tself.s_name = \"\"\t\t\t\t# Name of the structure\n\t\tself.l_s_leading_data = []\t\t# PDB information written above the atom properties\n\t\tself.l_s_trailing_data = []\t\t# PDB information written under the atom properties\n\n\t\t# Structural fields\n\t\tself.i_atom_count = 0\t\t\t# Number of atoms in the structure\n\t\tself.a_atoms = None\t\t\t\t# Array of atoms properties\n\t\tself.a_max_coord = None\t\t\t# Maximal coordinates for each axis\n\t\tself.a_min_coord = None\t\t\t# Minimal coordinates for each axis\n\n\t\t# Grid fields\n\t\tself.a_grid = None\t\t\t\t# 3D grid containing the structure\n\t\tself.l_l_elements = None\t\t# Set of atoms contained in the structure\n\n\t\t# Solubilization fields\n\t\tself.o_tree = None\t\t\t\t\t\t# A KDTree object representing the exact placement of atoms, used for distance determination\n\n\t\t# Comparison fields\n\t\tself.b_loaded = False\t\t# Keeps tracks of the state of the structure\n\n\t\t# Ligands fields\n\t\tself.l_o_ligands = []\t\t# A list for the ligands\n\n\t\t# Pocket fields\n\t\tself.l_i_pocket_residues = []\t\t# List of the residues included in the pocket\n\t\tself.a_pocket_atoms = None\t\t\t# Array of the pocket atoms properties\n\t\tself.a_pocket_grid = None\t\t\t# 3D grid containing the pocket\n\n\t\t# Miscellaneous fields\n\t\tself.a_min_coord = None\t\t# Minimum coordinates of the structure\n\t\tself.a_max_coord = None\t\t# Maximum coordinates of the structure\n\t\tself.f_mass = 0.0\t\t\t# Mass of the structure", "def buildClusters(self):\n oldLatFile = 'needed_files/lat.in'\n oldFile = open(oldLatFile, 'r')\n oldLines = [line for line in oldFile]\n oldFile.close()\n \n newFile = open('enum/lat.in','w')\n for i in xrange(len(oldLines)):\n if 'Number pairs' in oldLines[i-1] and i>=1: #bch use label on previous line\n for num in self.clusterNums:\n newFile.write(str(num) + \" \")\n newFile.write(\"\\n\")\n else:\n newFile.write(oldLines[i])\n newFile.close()\n \n lastDir = os.getcwd()\n os.chdir(lastDir + '/enum')\n if sum(self.clusterNums)<=1500: #the 1500 assumes you are running Main with 16G. \n subprocess.call([self.uncleExec, '10'], stdout=self.uncleOut)\n else:\n subprocess.call(['echo','Warning: BLOCKING CLUSTER JOB to save time'])\n# clustersjob = ClustersBuild.clustersjob()\n# clustersjob.clustBuild()\n# \n os.chdir(lastDir)" ]
[ "0.58579195", "0.569111", "0.5684001", "0.5625636", "0.5491703", "0.5471267", "0.5419827", "0.5221259", "0.5216647", "0.51783824", "0.5170403", "0.5136936", "0.5105469", "0.5097625", "0.5095089", "0.50907356", "0.5079668", "0.50673115", "0.50583243", "0.5052894", "0.50310284", "0.50290203", "0.50286514", "0.5028447", "0.5026145", "0.50234544", "0.5019249", "0.5016017", "0.5008703", "0.5007941" ]
0.7737415
0
Gets total number of structures in enumeration
def getNtot(self,dir): return int(readfile(dir + '/struct_enum.out')[-1].split()[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return len([i for i in self.iteritems()])", "def __len__(self):\n return len(self._enums)", "def count(self):\n return self.size()", "def size(self):\n\t\treturn self._count", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def count(self):\n return len(self.names)", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def count(self):\n return len(self.read_ints())", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def getSize(self):\n return sum(m.getSize() for m in self.members)", "def count(self):\n # TODO not implemented yet\n return 0", "def getNumElements(self):\n return 1 + sum(m.getNumElements() for m in self.members)", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def count(self):\n return len(self)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_types_count():\n return len(type_dict.keys())", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def getTotalIndividualCount(self):\r\n return self._n", "def size(self):\r\n return len(atoms)", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def size(self):\n\t\treturn len(self.membersWithErrors)" ]
[ "0.6824703", "0.6796373", "0.6704745", "0.6684227", "0.66267043", "0.6545837", "0.65361506", "0.6515884", "0.6438335", "0.64224446", "0.6421904", "0.64189386", "0.6402237", "0.6392048", "0.6386996", "0.63749266", "0.6371249", "0.63576716", "0.63453263", "0.6344213", "0.6292458", "0.6272826", "0.6272826", "0.6272826", "0.6272826", "0.6270193", "0.62693816", "0.62657195", "0.6260872", "0.6251905" ]
0.7235647
0
Creates a directory for each atom in the atom list specified in settings.in. All the VASP and UNCLE files for the atom will be placed in this directory.
def makeAtomDirectories(self): for atom in self.atoms: atomDir = os.getcwd() + '/' + atom if not os.path.isdir(atomDir): subprocess.call(['mkdir',atomDir])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)", "def create_folders(self):\n\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_main_dir(self):\n\n set_of_dirs= [ self.root_laas_ws,\n self.root_laas_ws+'/src',\n self.root_laas_ws+'/install']\n \n # Creates set_of_dirs if they do not exist\n for a_dir in list_of_dirs:\n if not os.path(a_dir).is_dir():\n os.makedirs(a_dir,0o777,True)", "def initialize(self, directory_list=None):\n for directory in directory_list:\n if not os.path.exists(directory):\n os.makedirs(directory)", "def initialize_directories(): # pragma: no cover\n\n for i in (CACHE_DIR, CONFIG_DIR):\n i.mkdir(parents=True, exist_ok=True)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def create_paths(self):\n for path in self.PATHS_TO_CREATE:\n path = os.path.expanduser(path)\n if not os.path.isdir(path):\n if self.dry_run:\n print('mkdir -p {}'.format(path))\n else:\n os.makedirs(path)", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def create_dirs():\n\tif os.path.isdir(path):\n\t\tshutil.rmtree(path, ignore_errors=True)\n\tos.makedirs(path+\"/log\",exist_ok=True)\n\tos.makedirs(path+\"/losses\",exist_ok=True) \n\tos.makedirs(path+\"/samples\",exist_ok=True)\n\tos.makedirs(path+\"/model\",exist_ok=True)\n\tos.makedirs(path+\"/datasets\",exist_ok=True)\n\tshutil.copy2(\"config.py\", path+\"/config.py\")\n\tfor i in rconfig[\"datasets\"]:\n\t\tdsconfig = get_dsconfig(i)\n\t\tos.makedirs(path+\"/datasets/\"+dsconfig[\"id\"],exist_ok=True)\n\t\tshutil.copy2(i+\"/dsconfig.py\", path+\"/datasets/\"+dsconfig[\"id\"]+\"/dsconfig.py\")\n\t\tcopytree(dsconfig[\"split\"], path+\"/datasets/\"+dsconfig[\"id\"]+\"/split\")", "def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')", "def createDirs():\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/xml/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/xml/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Uploads/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Uploads/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/')", "def make_dir_from_list(dirList):\n for dirName in dirList:\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n print(\"Directory \", dirName, \" Created \")\n else:\n pass\n #print(\"Directory \" , dirName , \" already exists\")", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def _build_em_dirs(self):\n for tn in self.Tn:\n if not os.path.exists(os.path.join(self.em_res_top_dir,'tn%d'%tn)):\n os.makedirs(os.path.join(self.em_res_top_dir,'tn%d'%tn))", "def create_directories(path):\n directories = ['images', 'pdf', 'videos', 'audio', 'spreedsheet', 'text', 'scripts', 'docs', 'other']\n for directory in directories:\n create_directory(path, directory)", "def create_directory_structure(root, structure_path_list):\n path = os.path.join(root, *structure_path_list)\n try:\n os.makedirs(path)\n except OSError:\n print(\"failed to create directory structure\")\n sys.exit(2)", "def makeDirTree(baseDir, argNameList, argValueList, connectionSymbol=\"=\", seperationSymbol=\",\"):\n\tpreExeMkdirPost(argNameList, argValueList, baseDir)", "def directory_setup(self):\n if not os.path.exists(self.settings_dir):\n os.makedirs(self.settings_dir)\n\n if not os.path.exists(self.sync_dir):\n os.makedirs(self.sync_dir)", "def make_directories(names):\n os.mkdir('Student_Folders')\n os.chdir('Student_Folders')\n for name in names:\n os.mkdir(name)\n os.chdir(name)\n sub_dirs = ['Term 1', 'Term 2', 'Term 3']\n for drcty in sub_dirs:\n os.mkdir(drcty)\n os.chdir('..')", "def create_directories(self, app_label):\n for folder_name in [\"views\", \"urls\", \"templates/%s\" % app_label]:\n directory_path = \"%s/%s\" % (app_label, folder_name)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)", "def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1" ]
[ "0.6082272", "0.606713", "0.599873", "0.5985384", "0.5974638", "0.59574753", "0.59574753", "0.5939133", "0.5914958", "0.5829736", "0.58179426", "0.5801176", "0.5781476", "0.57666725", "0.576396", "0.57573605", "0.57452446", "0.56806016", "0.5641607", "0.56265324", "0.56227124", "0.5606009", "0.5589586", "0.55847347", "0.55520236", "0.55493", "0.5542284", "0.55366784", "0.5533401", "0.553082" ]
0.79111665
0
Propagate an event down to the base level view. An event first trickles down to the bottom level view, and then bubbles back up the view stack.
def _propagate_event(self, event, window): if self.active_view is None: return self._event_bus.propagate_event(event, self.active_view, window)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_event(self, event):\n self.give_sub_event.handle_event(event)", "def call(self, event):\n self.root.event_generate(event, when=\"tail\")", "def call(self, event):\n self.frame.event_generate(event, when=\"tail\")", "def react_to_event(self):\n raise NotImplementedError()", "def on_event(self, event):\r\n pass", "def trigger(self, event=None, method=EVENT_CAPTURE):\n\n # Event Capturing\n if method == EVENT_CAPTURE:\n self.run_handlers(event, EVENT_CAPTURE)\n\n # Let children capture the event\n if self.has_child():\n self.focused_child.trigger(event, EVENT_CAPTURE)\n\n else: # At the last child, start the bubbling\n self.trigger(event, EVENT_BUBBLE)\n\n # Event Bubbling\n else:\n self.run_handlers(event, EVENT_BUBBLE)\n\n # Let the event bubble to the parents\n if self.has_parent():\n self.parent.trigger(event, EVENT_BUBBLE)", "def handle_event(self, event):\n raise NotImplementedError(\n \"handle_event() is not implemented for base class.\")", "def on_event(self, event):\n pass", "def handle_event(self, event):\n raise _InheritanceError('Function not defined')", "def handle_event(self, event):\n if self.sub_event is not None:\n self.sub_event.handle_event(event)\n return\n\n elif not self.response_box.is_dead:\n self.response_box.handle_event(event)", "def handle_event(self, event):\n if self.active_sell_event is not None:\n self.active_sell_event.handle_event(event)\n else:\n super().handle_event(event)", "def __call__(self, event):\n if not self.events or event in self.events:\n super(EventHandler, self).__call__(event)", "def handle_event(self, event):\n pass", "def onMove(self,event=None):\n if self.app.DEBUG:\n print 'Event: Parent: %s.onMove'%self.__class__\n if self.redraw:self.redraw()", "def handleEvent(self, event):\n pass", "def handle_event(self, event, window):\n pass", "def on_event(self, event):", "def handle_event(self, event):", "def handle_event(self, event):\n if self.sub_event is not None:\n self.sub_event.handle_event(event)\n else:\n self.confirm_response.handle_event(event)", "def process_event(self, event):\n\t\tself.current_screen.control_manager.process_event(event)", "def ProcessMgrEvent(self, event):\r\n\r\n # first, give the owner frame a chance to override\r\n if self._frame:\r\n if self._frame.GetEventHandler().ProcessEvent(event):\r\n return\r\n \r\n self.ProcessEvent(event)", "def visit_event(self, event):", "def add_event(self, event):\r\n return super().insert_event(event)", "def raiseEvent (self, event, *args, **kw):\n rv = EventMixin.raiseEvent(self, event, *args, **kw)\n if type(event) is not Update:\n EventMixin.raiseEvent(self, Update(event))\n return rv", "def process_event(self, event):\r\n pass", "def trigger(self, type, event):", "def handleEvent(self, event):\n for child in self.children:\n child.handleEvent(event)", "def send_event(self, event):\n\n\t\tself._window.send_event(event)", "def onViewLog(self):\n view_log.ViewLog(self.root, self.log)", "def on_event(self, event):\r\n\r\n print(\"on event called, event:\", event)\r\n\r\n self.state = self.state.on_event(event)\r\n publish_state_msg(state_msg, odrive_bridge.get_state())" ]
[ "0.60731107", "0.59084016", "0.5793149", "0.57658976", "0.5637927", "0.5627393", "0.56154233", "0.55550563", "0.55541426", "0.552596", "0.55187017", "0.550784", "0.54867345", "0.54576814", "0.544562", "0.5444484", "0.544216", "0.5440826", "0.540754", "0.5361798", "0.53442204", "0.523279", "0.5231625", "0.5226547", "0.5205803", "0.5204477", "0.5191809", "0.51688975", "0.5151924", "0.5072882" ]
0.7564274
0
Attach this controller to a window. Once a controller is attached to a window it will block. Events in the
async def attach_to_window(self, window): try: while True: self._poll(window) window.erase() self.render(window) window.update_cursor() window.refresh() await asyncio.sleep(0) except KeyboardInterrupt: window.close_reason = 'Ctrl-C' except CloseSplutterWindow as e: window.close_reason = str(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def SetWindow(self, w):\r\n\r\n self.window = w", "def show(self, window):\r\n\r\n return", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def add_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n window.setParent(self)\n self.addSubWindow(window)\n window.show()\n window.restore_window()\n self._logger.debug(\"done\")", "def rendererWindowActivated(self, sw):\n pass", "def show_window(self):\n self._window.grab_set()\n self._window.wait_window()", "def do_activate(self, *args, **kwargs):\n self.register_signals()\n self.perform_setup()\n assert self.main_window\n self.main_window.show()\n self.hold()", "def set_window(self, handle):\n pass", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def show_window(self):\n self.show()", "def win_raise(self):\n self.raise_()\n self.activateWindow()", "def present(self):\n if self.isWindow :\n self.present(self)\n else :\n assert hasattr(self, 'window'), \\\n \"ManagedWindow: self.window does not exist!\"\n self.window.present()", "def ev_windowenter(self, event: WindowEvent) -> None:", "def SetManagedWindow(self, managed_window):\r\n\r\n if not managed_window:\r\n raise Exception(\"Specified managed window must be non-null. \")\r\n \r\n self._frame = managed_window\r\n self._frame.PushEventHandler(self)\r\n\r\n # if the owner is going to manage an MDI parent frame,\r\n # we need to add the MDI client window as the default\r\n # center pane\r\n\r\n if isinstance(self._frame, wx.MDIParentFrame):\r\n mdi_frame = self._frame\r\n client_window = mdi_frame.GetClientWindow()\r\n\r\n if not client_window:\r\n raise Exception(\"Client window is None!\")\r\n\r\n self.AddPane(client_window, AuiPaneInfo().Name(\"mdiclient\").\r\n CenterPane().PaneBorder(False))\r\n\r\n elif isinstance(self._frame, tabmdi.AuiMDIParentFrame):\r\n\r\n mdi_frame = self._frame\r\n client_window = mdi_frame.GetClientWindow()\r\n\r\n if not client_window:\r\n raise Exception(\"Client window is None!\")\r\n\r\n self.AddPane(client_window, AuiPaneInfo().Name(\"mdiclient\").\r\n CenterPane().PaneBorder(False))", "def start_render_window(self):\n\n # Initialize interactor\n self.__render_window_interactor.Initialize()\n\n # Start render window with interactor\n self.__render_window.Render()\n self.__render_window_interactor.Start()", "def TransferToWindow(self):\n return True", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def show(self):\n self._window.show()", "def begin(self, controller):\n self.controller = controller\n self.centre.findChild(QScrollArea, \"filtersScroller\")\\\n .findChild(QWidget, \"scrollerContents\").setLayout(self.filterVbox)\n\n self.extract_filters()\n self.centre.findChild(QLabel, \"loadingLabel\").hide()\n self.setup_buttons()\n\n self.window.show()", "def open_transitWindow(self):\n self.window = surveyWindow(self, imaging=False)\n self.hide()", "def activate_controller(self):\n if self.controller_address:\n #print \"Activating controller...\"\n self.controller = Controller(\n self.controller_address,\n self.proxy_address,\n self.migrating)\n self.controller.switch = self\n else:\n print \"[WARNING] Controller undefined\"", "def ev_windowshown(self, event: WindowEvent) -> None:", "def ev_windowenter(self, event: tcod.event.WindowEvent) -> T | None:", "def __window_focus(self):\n pass", "def move_to_win(self):\n self.external_win = PlotWindow(plot=self.pw, parent=self)\n self.external_win.closeWin.connect(lambda: self.layout().takeAt(1))\n self.external_win.closeWin.connect(lambda: self.layout().insertWidget(1, self.pw))\n self.external_win.closeWin.connect(lambda: self.btn_open.setEnabled(True))\n self.external_win.show()", "def launchSyncToolWindow(self):\r\n self.unbind()\r\n self.videoPlayer.pause()\r\n self.w_synctool = SyncToolWindow(self)", "def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()", "def SetWindow(self, wnd):\r\n\r\n self._wnd = wnd\r\n\r\n if wnd.GetSizer(): # the window is a complex one hold by a sizer\r\n size = wnd.GetBestSize()\r\n else: # simple window, without sizers\r\n size = wnd.GetSize()\r\n\r\n # We have to bind the wx.EVT_SET_FOCUS for the associated window\r\n # No other solution to handle the focus changing from an item in\r\n # CustomTreeCtrl and the window associated to an item\r\n # Do better strategies exist?\r\n self._wnd.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)\r\n \r\n self._height = size.GetHeight() + 2\r\n self._width = size.GetWidth()\r\n self._windowsize = size\r\n \r\n # We don't show the window if the item is collapsed\r\n if self._isCollapsed:\r\n self._wnd.Show(False)\r\n\r\n # The window is enabled only if the item is enabled \r\n self._wnd.Enable(self._enabled)\r\n self._windowenabled = self._enabled" ]
[ "0.6167363", "0.61673355", "0.6161434", "0.6100601", "0.60200405", "0.58870715", "0.5881838", "0.5869799", "0.5843875", "0.58101106", "0.5788466", "0.5780572", "0.57512116", "0.5646469", "0.5554697", "0.5553541", "0.55286634", "0.55226207", "0.54758424", "0.53899294", "0.5368905", "0.53531367", "0.53426194", "0.5335518", "0.5333432", "0.5317826", "0.5317174", "0.53131586", "0.5292531", "0.52915835" ]
0.68219286
0
Sets the new coverage of the test. The physical test file will also be renamed to have the coverage appended to the front.
def set_coverage(self, coverage): self.coverage = coverage if os.path.isfile(TESTS_PATH + "/" + self.name): os.rename(TESTS_PATH + "/" + self.name, TESTS_PATH + "/" \ + self.app_pkg + "_"+self.timestamp + "_" \ + str(coverage) + ".sh") else: f_out = open(TESTS_PATH + "/" + self.app_pkg + "_"+self.timestamp \ + "_" + str(coverage) + ".sh", "w") f_out.write(self.script) f_out.close() self.name = self.app_pkg + "_" + self.timestamp + "_" + str(coverage) + ".sh"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coverage():\n print(\"Coverage tests always re-run\")\n with safe_cd(SRC):\n my_env = config_pythonpath()\n # You will need something like this in pytest.ini\n # By default, pytest is VERY restrictive in the file names it will match.\n #\n # [pytest]\n # DJANGO_SETTINGS_MODULE = core.settings\n # python_files = tests.py test_*.py *_tests.py test*_*.py *_test*.py\n if not os.path.exists(\"pytest.ini\") and IS_DJANGO:\n print(\n \"pytest.ini MUST exist for Django test detection or too few tests are found.\"\n )\n exit(-1)\n return\n\n my_env = config_pythonpath()\n command = \"{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 55 --verbose\".format(\n PIPENV, \"test\", PROJECT_NAME\n )\n execute_with_environment(command, my_env)", "def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def coverage(session) -> None:\n session.install(\".[test]\", \"pytest-cov\")\n session.run(\n \"pytest\", \"-n\", \"auto\", \"--cov=./\", \"--cov-report=xml\", *session.posargs\n )", "def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def coverage(session):\n session.install(\"coverage[toml]\", \"codecov\")\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)", "def cov(test_class):\n if test_class == 'all':\n tests = unittest.TestLoader().discover('project/tests')\n else:\n # note, test module must be imported above, doing lazily for now\n test_module = globals()[test_class]\n tests = unittest.TestLoader().loadTestsFromTestCase(test_module)\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def main():\n import coverage\n import nose\n import os\n from shutil import rmtree\n rmtree('./covhtml', ignore_errors=True)\n try:\n os.remove('./.coverage')\n except Exception,e:\n pass\n\n # run nose in its own process because the .coverage file isn't written\n # until the process terminates and we need to read it\n nose.run()", "def coverage(ctx):\n ctx.run(\"coverage run --source {PROJECT_NAME} -m pytest\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"coverage report -m\")\n ctx.run(\"coverage html\")", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def test_coverage_4(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_4(inst2)", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def test_coverage_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-2.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_1(inst2)", "def test(coverage):\n print('success')\n pass", "def cov():\n tests = unittest.TestLoader().discover('tests')\n result = unittest.TextTestRunner(verbosity=1).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def task_coverage():\n return {\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\n 'verbosity': 2,\n }", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1", "def _cmd_coverage(args):\n pset = coverage.do_coverage(\n args.interval,\n args.bam_file,\n args.count,\n args.min_mapq,\n args.processes,\n args.fasta,\n )\n if not args.output:\n # Create an informative but unique name for the coverage output file\n bambase = core.fbase(args.bam_file)\n bedbase = core.fbase(args.interval)\n tgtbase = (\n \"antitargetcoverage\" if \"anti\" in bedbase.lower() else \"targetcoverage\"\n )\n args.output = f\"{bambase}.{tgtbase}.cnn\"\n if os.path.exists(args.output):\n args.output = f\"{bambase}.{bedbase}.cnn\"\n core.ensure_path(args.output)\n tabio.write(pset, args.output)", "def update_node_coverage(G, node, new_cov):\n if node not in G.nodes(): # nothing to be done, perhaps already removed\n return\n if new_cov == 0:\n G.remove_node(node)\n if rc_node(node) in G.nodes():\n G.remove_node(rc_node(node))\n else:\n G.add_node(node, cov=new_cov)\n if rc_node(node) in G.nodes():\n G.add_node(rc_node(node), cov=new_cov)", "def coverage(context):\n context.run(\" \".join([\n \"python -m pytest\",\n \"--cov=%s\" % PACKAGE_NAME,\n \"--cov-report html\",\n \"--cov-branch\",\n \"--cov-fail-under=75\"\n ]))", "def test_run_coverage(self):\n cmd = GreenTestCommand(Distribution())\n cmd.coverage = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(), Contains(\"-r\"))", "def test_coverage_3(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-ehic.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_3(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_3(inst2)", "def run_test_coverage(self, build=False):\n\n # print('Running unit tests for package %s' % package)\n if self.is_metapackage:\n self.out = 'This is a metapackage'\n return 0\n\n if not self.has_test:\n self.out = 'No tests defined on CMakeLists.txt'\n return 0\n\n if build:\n self.build_for_coverage() \n\n # Capture initial zero coverage data\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --directory build --zerocounters')\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --capture --initial --directory build/' + self.name + ' --output-file build/lcov.base')\n\n # Run tests with coverage flags\n extra_parms = '--no-deps --cmake-args -DCMAKE_CXX_FLAGS=\"-g -O0 -Wall -fprofile-arcs -ftest-coverage\" -DCMAKE_EXE_LINKER_FLAGS=\"-fprofile-arcs -ftest-coverage\"'\n cmd = ['catkin', 'run_tests', self.name]\n cmd.extend(shlex.split(extra_parms))\n\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE,\n universal_newlines=True)\n\n self.out , self.err = process.communicate()\n\n self.setSummary(self.get_test_summary())\n self.setExecutionStatus(process.returncode)\n\n if process.returncode != 0:\n return process.returncode\n\n # Capture coverage data after running tests\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --no-checksum --directory build/' + self.name + ' --capture --output-file build/lcov.info')\n\n # Add baseline counters\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --add-tracefile build/lcov.base --add-tracefile build/lcov.info --output-file build/lcov.total')\n\n # Remove coverage data for a particular set of files from the tracefile\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --remove build/lcov.total /usr* /opt* */test/* */CMakeFiles/* */build/* --output-file build/lcov.total.cleaned')\n \n # Extract line coverage from output\n if 'lines......:' in out:\n self.coverage = float(out.split('lines......: ')[1].split('%')[0])\n else:\n self.coverage = 0\n\n return 0", "def test_reopen_cache():\n\n env = os.environ.copy()\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n # Set the COVERAGE_PROCESS_START env. variable.\n # Allows to cover files run in a subprocess\n # http://nedbatchelder.com/code/coverage/subprocess.html\n env[\"COVERAGE_PROCESS_START\"] = path + \"/../.coveragerc\"\n\n p = subprocess.Popen(\n [sys.executable, \"unittests/reopen_cache_tester.py\"],\n stdout=subprocess.PIPE,\n env=env)\n print(p.stdout.read())\n p.wait()\n p.stdout.close()", "def test_coverage_2(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-selfpay.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_2(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_2(inst2)", "def cuv(ctx, coverage_fname, exclude, branch):\n if coverage_fname is None:\n coverage_fname = find_coverage_data('.')\n # coverage_fname still could be None\n\n cfg = Config()\n ctx.obj = cfg\n\n cfg.nice_width = min(80, shutil.get_terminal_size()[0])\n cfg.exclude = exclude\n\n cfg.branch = branch\n if coverage_fname is not None:\n cfg.data = coverage.Coverage(data_file=coverage_fname)\n cfg.data.load()\n else:\n raise click.UsageError(\n \"No coverage data. Do you have a .coverage file?\"\n )", "def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)", "def coverage_start(self, features, marker):\n self.coverage.load()\n self.coverage.start()", "def derive_project_coverage(self) -> None:\n self.get_project_column_description_coverage()\n self.get_project_test_coverage()", "def define_coverage(self, id=None, units=None, standard_name=None, coverage_dimensions=None):", "def test(coverage, test_names):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import subprocess\n os.environ['FLASK_COVERAGE'] = '1'\n sys.exit(subprocess.call(sys.argv))\n\n import unittest\n if test_names:\n tests = unittest.TestLoader().loadTestsFromNames(test_names)\n else:\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()" ]
[ "0.6401834", "0.61861366", "0.61683387", "0.61132556", "0.59674615", "0.5901822", "0.58574665", "0.5827963", "0.58016634", "0.5800805", "0.5796055", "0.5757621", "0.5757477", "0.5739365", "0.57314754", "0.5698605", "0.56972533", "0.5687149", "0.56821626", "0.5673393", "0.5663225", "0.56505686", "0.5647798", "0.56363785", "0.5596836", "0.5591364", "0.5575166", "0.55528605", "0.552917", "0.55281013" ]
0.81936353
0
return a list of urls to ps managed by the PSS
def list_ps(cls, pss_url): pss_url = cls.__get_url(pss_url) logger.debug("List PS at %s"%pss_url) pss_dir = saga.advert.directory(pss_url, saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite) ps_list = pss_dir.list() ps_full_urls = [] for i in ps_list: ps_full_urls.append(pss_url + "/" + i) return ps_full_urls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_pd(cls, pss_url):\n pds_url = cls.__get_url(pds_url)\n logger.debug(\"List PDS at %s\"%pds_url)\n pds_dir = saga.advert.directory(pds_url, saga.advert.Create | \n saga.advert.CreateParents | \n saga.advert.ReadWrite)\n \n pd_list = pds_dir.list()\n pd_full_urls = []\n for i in pd_list:\n pd_full_urls.append(pss_url + \"/\" + i) \n return pd_full_urls", "def getURLs():", "def _get_all_pinged_urls():\n p = data.DinghyData(redis_host)\n\n return p.get_all_pinged_urls()", "def get_urls():\r\n return []", "def urls(self) -> list[str]:\r\n ...", "def get_psn_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tpsns = udb.psn_list()\n\t\treturn [_transform_psn(p) for p in psns]\n\tfinally:\n\t\tudb.close()", "def psvs(self): \n return self._link_reg.psvs", "def urls(self):\n return self._list_urls()", "def _findSupplUrls(self, landPage):\n urlParts = ['/suppdata/']\n for urlPart in urlParts:\n suppUrls = findLinksWithUrlPart(landPage, urlPart)\n if len(suppUrls) > 0:\n return suppUrls\n\n return []", "def sqs_urls(self) -> Sequence[str]:\n return pulumi.get(self, \"sqs_urls\")", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def _get_component_psvs(cls, manifest_url):\n return [k[\"url\"] for k in json.loads(cls._read_s3_url(manifest_url))[\"entries\"]]", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def urls(self) -> str:\n return self._data['urls']", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def get_sp_policys(self, context):\n # handling policys method in RPC\n response = self.dns_manager.get_sp_policys(context)\n return response", "def apt_urls(self):\n return self._apt_urls", "def apt_urls(self):\n return self._apt_urls", "def get_parliament_members_urls(self) -> list:\n directory = self.read_html(self.url)\n return [\n a.attrs[\"href\"]\n for a in directory.select(\".single-mp a\")\n if a.attrs[\"href\"].startswith(\"https\")\n ]", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))", "def get_urls(db):\n return db.meta.find_one({'name':\"urls\"})['urls']", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def GetPublishedProcesses():\r\n pass", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls" ]
[ "0.7140879", "0.653726", "0.6411908", "0.6388741", "0.63296497", "0.6311775", "0.6185315", "0.6074509", "0.60267735", "0.6024672", "0.5998153", "0.5941728", "0.59041774", "0.58864075", "0.5875603", "0.58739895", "0.58716524", "0.5864728", "0.5787926", "0.5787926", "0.5770929", "0.5745957", "0.5736422", "0.571627", "0.5700134", "0.56916034", "0.5671733", "0.56457484", "0.5621547", "0.56142795" ]
0.82921356
0
return a list of urls to ps managed by the PSS
def list_pd(cls, pss_url): pds_url = cls.__get_url(pds_url) logger.debug("List PDS at %s"%pds_url) pds_dir = saga.advert.directory(pds_url, saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite) pd_list = pds_dir.list() pd_full_urls = [] for i in pd_list: pd_full_urls.append(pss_url + "/" + i) return pd_full_urls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_ps(cls, pss_url):\n pss_url = cls.__get_url(pss_url)\n logger.debug(\"List PS at %s\"%pss_url)\n pss_dir = saga.advert.directory(pss_url, saga.advert.Create | \n saga.advert.CreateParents | \n saga.advert.ReadWrite)\n \n ps_list = pss_dir.list()\n ps_full_urls = []\n for i in ps_list:\n ps_full_urls.append(pss_url + \"/\" + i) \n return ps_full_urls", "def getURLs():", "def _get_all_pinged_urls():\n p = data.DinghyData(redis_host)\n\n return p.get_all_pinged_urls()", "def get_urls():\r\n return []", "def urls(self) -> list[str]:\r\n ...", "def get_psn_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tpsns = udb.psn_list()\n\t\treturn [_transform_psn(p) for p in psns]\n\tfinally:\n\t\tudb.close()", "def psvs(self): \n return self._link_reg.psvs", "def urls(self):\n return self._list_urls()", "def _findSupplUrls(self, landPage):\n urlParts = ['/suppdata/']\n for urlPart in urlParts:\n suppUrls = findLinksWithUrlPart(landPage, urlPart)\n if len(suppUrls) > 0:\n return suppUrls\n\n return []", "def sqs_urls(self) -> Sequence[str]:\n return pulumi.get(self, \"sqs_urls\")", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def _get_component_psvs(cls, manifest_url):\n return [k[\"url\"] for k in json.loads(cls._read_s3_url(manifest_url))[\"entries\"]]", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def urls(self) -> str:\n return self._data['urls']", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def get_sp_policys(self, context):\n # handling policys method in RPC\n response = self.dns_manager.get_sp_policys(context)\n return response", "def apt_urls(self):\n return self._apt_urls", "def apt_urls(self):\n return self._apt_urls", "def get_parliament_members_urls(self) -> list:\n directory = self.read_html(self.url)\n return [\n a.attrs[\"href\"]\n for a in directory.select(\".single-mp a\")\n if a.attrs[\"href\"].startswith(\"https\")\n ]", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))", "def get_urls(db):\n return db.meta.find_one({'name':\"urls\"})['urls']", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def GetPublishedProcesses():\r\n pass", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls" ]
[ "0.82921326", "0.653874", "0.6413279", "0.6390923", "0.633152", "0.63116646", "0.61851937", "0.60768485", "0.6028015", "0.60264033", "0.59963876", "0.59428346", "0.5906632", "0.5888624", "0.5877278", "0.58748275", "0.5870361", "0.5865255", "0.57893056", "0.57893056", "0.5773107", "0.57451826", "0.57381153", "0.57163024", "0.5698026", "0.5693221", "0.5670572", "0.56479156", "0.5620837", "0.5616483" ]
0.71406835
1
Calculates new indicator position based on current pitch.
def updateIndicator(self): newIndicatorX = self.getPosFromPitch(self.listener.pitch) self.triTip = (newIndicatorX, self.triTip[1]) self.triLeft = (self.triTip[0] - self.width*0.01, self.height*.3) self.triRight = (self.triTip[0] + self.width*0.01, self.height*.3) self.indicatorCoords = ( self.triLeft, self.triTip, self.triRight) self.indicator.points = self.indicatorCoords self.indicator.fill = self.indicatorColor[self.inTune]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pitch(self, pitch):\n pass", "def __calculateOffset(self):\n #if len(self.__XValue) > 0:\n # print(\"GPSBearing: \"+str(round(self.__GPSBearing[-1]))+\", heading: \"+str(round(self.value))+\n # \", x: \"+str(round(self.__XValue[-1]))+\", stdev: \"+str(round(np.std(self.__GPSBearing),5))+\n # \", offset: \"+str(self.__offset))\n if len(self.__GPSBearing) == self.__GPSBearing.maxlen and np.std(self.__GPSBearing) < 0.2 and self.speed > 7:\n self.__offset = (np.mean(self.__GPSBearing) + 360 - np.mean(self.__XValue)) % 360\n #print(\"new offset: \"+str(self.__offset)) ", "def relative():\n def transposeRelative(token, lastPitch):\n \"\"\"\n Make a new relative pitch from token, if possible.\n Return the last pitch used (absolute, untransposed).\n \"\"\"\n p = Pitch.fromToken(token, tokenizer)\n if not p:\n return lastPitch\n # absolute pitch determined from untransposed pitch of lastPitch\n octaveCheck = p.octaveCheck is not None\n p.absolute(lastPitch)\n if source.inSelection:\n # we may change this pitch. Make it relative against the\n # transposed lastPitch.\n try:\n last = lastPitch.transposed\n except AttributeError:\n last = lastPitch\n # transpose a copy and store that in the transposed\n # attribute of lastPitch. Next time that is used for\n # making the next pitch relative correctly.\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy # store transposed copy in new lastPitch\n new = copy.relative(last)\n if octaveCheck:\n new.octaveCheck = copy.octave\n if relPitchToken:\n # we are allowed to change the pitch after the\n # \\relative command. lastPitch contains this pitch.\n lastPitch.octave += new.octave\n new.octave = 0\n changes.replaceToken(relPitchToken[0], lastPitch.output(tokenizer.language))\n del relPitchToken[:]\n changes.replaceToken(token, new.output(tokenizer.language))\n return p\n\n lastPitch = None\n relPitchToken = [] # we use a list so it can be changed from inside functions\n \n # find the pitch after the \\relative command\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n lastPitch = Pitch.fromToken(token, tokenizer)\n if lastPitch and source.inSelection:\n relPitchToken.append(token)\n token = next(source)\n if not lastPitch:\n lastPitch = Pitch.c1()\n \n # eat stuff like \\new Staff == \"bla\" \\new Voice \\notes etc.\n while True:\n if token in ('\\\\new', '\\\\context'):\n next(source) # skip context type\n token = next(source)\n if token == '=':\n next(source) # skip context name\n token = next(source)\n elif isinstance(token, tokenizer.NoteMode):\n token = next(source)\n else:\n break\n \n # now transpose the relative expression\n if isinstance(token, tokenizer.OpenDelimiter):\n # Handle full music expression { ... } or << ... >>\n for token in consume():\n if token == '\\\\octaveCheck':\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n p = Pitch.fromToken(token, tokenizer)\n if p:\n if source.inSelection:\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy\n changes.replaceToken(token, copy.output(tokenizer.language)) \n lastPitch = p\n del relPitchToken[:]\n elif isinstance(token, tokenizer.OpenChord):\n chord = [lastPitch]\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n lastPitch = chord[:2][-1] # same or first\n break\n elif isinstance(token, tokenizer.Pitch):\n chord.append(transposeRelative(token, chord[-1]))\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.OpenChord):\n # Handle just one chord\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n break\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.Pitch):\n # Handle just one pitch\n transposeRelative(token, lastPitch)", "def getPosFromPitch(self, pitch):\n\t\t# Width of scale: \n\t\t#\tself.width - 2*self.margin\n\t\t# Width of each individual note: \n\t\t#\twidth of scale / len(Pitch.noteNames) \n\t\t# Width of each subdivision inside the note: \n\t\t# width of ind note / self.scaleSubSections\n\t\t\n\t\tif ( type(self.listener.pitch) == type(None) ):\n\t\t\tself.errorCount += 1\n\t\t\treturn self.triTip[0] # don't move\n\t\t\t\n\t\tself.errorCount = 0 \n\t\tscaleWidth = self.width - 2*self.margin\n\t\tscaleNoteSubWidth = self.scaleNoteWidth / self.scaleSubSections\n\t\tcurMidi = self.listener.pitch.midi\n\t\tscaleFraction = (curMidi % 12) + 1\n\t\t\n\t\t# Distance of scale block from left of screen\n\t\txPos = (self.margin + scaleFraction * \n\t\t\t\tself.scaleNoteWidth - (12*scaleNoteSubWidth) )\n\t\tif ( self.margin <= xPos ):\n\t\t\treturn xPos\n\t\telse:\t\n\t\t\treturn self.triTip[0] # off screen, don't move", "def newPitch(token, pitch, lastPitch):\n pitch.absolute(lastPitch)\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def pitch_roll(self, px, pz):\r\n px -= self.unif[0]\r\n pz -= self.unif[2]\r\n halfw = self.width/2.0\r\n halfd = self.depth/2.0\r\n dx = self.width/self.ix\r\n dz = self.depth/self.iy\r\n x0 = int(math.floor((halfw + px)/dx + 0.5))\r\n if x0 < 0: x0 = 0\r\n if x0 > self.ix-1: x0 = self.ix-1\r\n z0 = int(math.floor((halfd + pz)/dz + 0.5))\r\n if z0 < 0: z0 = 0\r\n if z0 > self.iy-1: z0 = self.iy-1\r\n normp = array(self.buf[0].normals[z0*self.ix + x0])\r\n # slight simplification to working out cross products as dirctn always 0,0,1\r\n #sidev = cross(normp, dirctn)\r\n sidev = array([normp[1], -normp[0], 0.0])\r\n sidev = sidev / sqrt(sidev.dot(sidev))\r\n #forwd = cross(sidev, normp)\r\n forwd = array([-normp[2]*normp[0], -normp[2]*normp[1],\r\n normp[0]*normp[0] + normp[1]*normp[1]])\r\n forwd = forwd / sqrt(forwd.dot(forwd))\r\n return (degrees(arcsin(-forwd[1])), degrees(arctan2(sidev[1], normp[1])))", "def pitch_roll(self, px, pz):\n px -= self.unif[0]\n pz -= self.unif[2]\n halfw = self.width/2.0\n halfd = self.depth/2.0\n dx = self.width/self.ix\n dz = self.depth/self.iy\n x0 = int(math.floor((halfw + px)/dx + 0.5))\n if x0 < 0: x0 = 0\n if x0 > self.ix-1: x0 = self.ix-1\n z0 = int(math.floor((halfd + pz)/dz + 0.5))\n if z0 < 0: z0 = 0\n if z0 > self.iy-1: z0 = self.iy-1\n normp = array(self.buf[0].normals[z0*self.ix + x0])\n # slight simplification to working out cross products as dirctn always 0,0,1\n #sidev = cross(normp, dirctn)\n sidev = array([normp[1], -normp[0], 0.0])\n sidev = sidev / sqrt(sidev.dot(sidev))\n #forwd = cross(sidev, normp)\n forwd = array([-normp[2]*normp[0], -normp[2]*normp[1],\n normp[0]*normp[0] + normp[1]*normp[1]])\n forwd = forwd / sqrt(forwd.dot(forwd))\n return (degrees(arcsin(-forwd[1])), degrees(arctan2(sidev[1], normp[1])))", "def pos(self):\n return (self.raw - self.raw_zero) / self.ratio", "def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)", "def newPitch(token, pitch):\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def pitch(self):\n return self._pitch", "def seek_behaviour(self):\n x, y = (int (self.posicao[0]),int(self.posicao[1]))\n nx,ny = tuple(pontos[self.nextpoint])\n rot = self.rotacao\n direction = pontos[self.nextpoint]-self.posicao", "def update_position(self):\n self.current_position = utility_methods.cylindrical(self.current_position + self.rotation)\n\n self.rotation_list.append(self.current_position)", "def calcPos(self,newpol):\n\n\t\tdetlatoff=(self.offset9()-self.offset10())*cosd(newpol)+self.offset10()\n\t\tnewoffcry = (self.offset2()-self.offset3())*cosd(newpol)+self.offset3()\n\t\tnewdetoff = (self.offset4()-self.offset8())*cosd(newpol)+self.offset8() +self.offset5()\n\n\t\twl = BLi.getWavelength()\n\t\tself.thbragg = 180/pi*asin(wl/(2*self.dspace))\n\t\tnewthp=self.sign()*self.thbragg+newoffcry\n\t\tnewtthp=2*self.sign()*self.thbragg+newdetoff\n\t\tprint \"stokes=%1.2f thp=%1.2f tthp=%1.2f detlatoff=%1.2f\"%(newpol,newthp,newtthp,detlatoff)", "def rate_position(current, target):\n return (target[0] - current[0]) ** 2 + (target[1] - current[1]) ** 2", "def pitch(self):\n return self['pitch']", "def get_EUL_Pitch(self):\n eul_raw = self.i2c.mem_read(2, self.addr, OUT_EUL_PITCH_LSB)\n eul_pitch = self.sign_val(((eul_raw[1]<<8) + eul_raw[0]))/16.0\n return (eul_pitch)\n #print(eul_pitch)", "def yaw_pitch_roll(self):\n\n self._normalise()\n yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]),\n 1 - 2*(self.q[2]**2 + self.q[3]**2))\n pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1]))\n roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]),\n 1 - 2*(self.q[1]**2 + self.q[2]**2))\n\n return yaw, pitch, roll", "def pitch_dia(self, value):\n Gear.pitch_dia.fset(self, value)\n self._calcs()", "def crouched_position(mp):\n joints = ['LHipPitch', 'RHipPitch', 'LKneePitch', 'RKneePitch']\n ankles = ['LAnklePitch', 'RAnklePitch']\n\n joint_angles = [-0.6074221134185791,\n -0.4356980323791504,\n 1.6413381099700928,\n 1.5739259719848633]\n\n ankle_angles = [-0.9403839111328125, -1.0461461544036865]\n\n # actuation\n mp.setAngles(joints, joint_angles, 0.1)\n time.sleep(0.420)\n mp.setAngles(ankles, ankle_angles, 0.1)", "def base_roll_pitch_yaw_rate(self):\n return np.asarray([self._robot_state.roll_rate, self._robot_state.pitch_rate, self._robot_state.yaw_rate])", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def update_pose(self, data):\n # self.pose = data\n self.x_position = round(data.pose.pose.position.x, 4)\n self.y_position = round(data.pose.pose.position.y, 4)\n [yaw, _, _] = quaternion_to_euler(\n data.pose.pose.orientation.x, \n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w\n )\n \n self.theta = round(yaw, 4)\n print(f'(Reading) X: {data.pose.pose.position.x}\\t Y:{data.pose.pose.position.y}')\n # self.theta = round(data.pose.pose.orientation.z, 4)", "def current_probe_position(self):\n\t\t# Obtain encoder feedback and calculate probe position\n\t\tx_position = self.x_mc.current_position() / self.steps_per_cm\n\t\ty_position = self.y_mc.current_position() / self.steps_per_degree *5 #Seems that 1 encoder unit = 5 motor step unit\n\n\t\treturn x_position, y_position", "def update(self):\n if self._position:\n self._frequency = len(self._position)\n for i in range(len(self._position)):\n # convert from text file\n self._position[i] = float(self._position[i])\n self._recency = [self._position[0]]\n for i in range(1, self._frequency):\n self._recency.append(self._position[i] - self._position[i - 1])\n self._recency.append(1 - self._position[self._frequency - 1])\n self._isUpdated = True", "def followAdjust(self, notes, ctable):\n \n sc=self.seq\n\n for offset in notes:\n nn = notes[offset]\n\n if len(nn) == 1 and nn[0].pitch != None:\n tb = self.getChordInPos(offset, ctable)\n\n if tb.chordZ:\n continue\n\n nn[0].pitch += ( tb.chord.rootNote + self.rootChord)\n\n return notes", "def on_cam_base_adjust_btn_clicked(self):\n pitch = self.cam_base_pitch_hSlider.value()\n yaw = self.cam_base_yaw_hSlider.value()\n len = self.cam_processing_len_edit.text()\n wid = self.cam_processing_width_edit.text()\n self.baseCamThread.cam.set_cam_parameters(int(len),float(wid))\n pitch, yaw = self.control1.device.cmd_cam_adjust(pitch, yaw)\n status = \"goint to angles as, pitch: \" + str(pitch) + \", yaw: \" + str(yaw)\n self.cam_set_status_txt(status)", "def __position_jaw_current1_cb(self, data):\n self.__position_jaw_current[0] = data.position[0]\n self.__get_jaw_event[0].set()", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def base_roll_pitch_yaw(self):\n #raise NotImplementedError('Not yet implemented!')\n return np.asarray([self._robot_state.roll, self._robot_state.pitch, self._robot_state.yaw])" ]
[ "0.6522374", "0.62633663", "0.61403", "0.60936415", "0.60467106", "0.60446906", "0.60429764", "0.6034919", "0.5962873", "0.5672661", "0.5647774", "0.5627969", "0.5582988", "0.5535736", "0.55170625", "0.5511091", "0.5506568", "0.54981196", "0.54818946", "0.5465644", "0.54480445", "0.54091287", "0.5395797", "0.53881603", "0.5387094", "0.5386857", "0.5378622", "0.53582937", "0.5358289", "0.53548294" ]
0.64335185
1
Get screen position of indicator from pitch.
def getPosFromPitch(self, pitch): # Width of scale: # self.width - 2*self.margin # Width of each individual note: # width of scale / len(Pitch.noteNames) # Width of each subdivision inside the note: # width of ind note / self.scaleSubSections if ( type(self.listener.pitch) == type(None) ): self.errorCount += 1 return self.triTip[0] # don't move self.errorCount = 0 scaleWidth = self.width - 2*self.margin scaleNoteSubWidth = self.scaleNoteWidth / self.scaleSubSections curMidi = self.listener.pitch.midi scaleFraction = (curMidi % 12) + 1 # Distance of scale block from left of screen xPos = (self.margin + scaleFraction * self.scaleNoteWidth - (12*scaleNoteSubWidth) ) if ( self.margin <= xPos ): return xPos else: return self.triTip[0] # off screen, don't move
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pitch(self):\n return self['pitch']", "def pitch(self):\n return self._pitch", "def current_probe_position(self):\n\t\t# Obtain encoder feedback and calculate probe position\n\t\tx_position = self.x_mc.current_position() / self.steps_per_cm\n\t\ty_position = self.y_mc.current_position() / self.steps_per_degree *5 #Seems that 1 encoder unit = 5 motor step unit\n\n\t\treturn x_position, y_position", "def pos(self):\n return (self.raw - self.raw_zero) / self.ratio", "def pitch(self, pitch):\n pass", "def get_position(self) -> float:\n return self._select_interface(self._rc_get_position,\n self._http_get_position)", "def get_linear_track_pos(self):\r\n return self._arm.get_linear_track_pos()", "def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)", "def get_position(self):\n return self.gripper_io.get_signal_value(\"position_response_m\")", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def get_EUL_Pitch(self):\n eul_raw = self.i2c.mem_read(2, self.addr, OUT_EUL_PITCH_LSB)\n eul_pitch = self.sign_val(((eul_raw[1]<<8) + eul_raw[0]))/16.0\n return (eul_pitch)\n #print(eul_pitch)", "def getAssemblyPitch(self):\n return self.spatialGrid.pitch", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def getPitch(self):\n step = self._elem.find('pitch/step')\n octave = self._elem.find('pitch/octave')\n if step is None or octave is None:\n raise MusicXMLParseError(\"this note does not have pitch\")\n\n note_name = step.text\n octave = int(octave.text)\n notated_accidental = self._get_text('accidental')\n\n notated_sharp = notated_accidental == 'sharp'\n notated_flat = notated_accidental == 'flat'\n notated_natural = notated_accidental == 'natural'\n\n key = self._attributes.getKeySignature()\n key_accidental_char, key_accidental_list = ACCIDENTAL_TABLE[key]\n \n if notated_natural:\n note_name += '='\n return (note_name, octave)\n if not notated_natural and note_name in key_accidental_list: # see what the \n note_name += key_accidental_char\n if notated_sharp:\n note_name += '#'\n if notated_flat:\n note_name += 'b'\n\n return (note_name, octave)", "def get_position(self):\r\n msg = struct.pack('>2B', 56, 00)\r\n response = self.query(msg)\r\n # Read and decode wavelength value (unknown units)\r\n encoded_wl = response[:2]\r\n wl = struct.unpack('>H', encoded_wl)[0]\r\n units, to_nm_multiplier = self.get_units()\r\n return wl * to_nm_multiplier", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def rl_get_point() -> int: # pragma: no cover\n if rl_type == RlType.GNU:\n return ctypes.c_int.in_dll(readline_lib, \"rl_point\").value\n\n elif rl_type == RlType.PYREADLINE:\n return int(readline.rl.mode.l_buffer.point)\n\n else:\n return 0", "def get_speaker_position(self, speaker):\n pos_x = c_float()\n pos_y = c_float()\n active = c_bool()\n self._call_fmod(\n \"FMOD_System_GetSpeakerPosition\",\n speaker.value,\n byref(pos_x),\n byref(pos_y),\n byref(active),\n )\n return so(x=pos_x.value, y=pos_y.value, active=active.value)", "def get_base_pos_on_screen(self,position):\n\n return self.seq_xstart+float(position-1)*self.base_scale.get(),self.seq_row", "def get_pitch(self, start):\n spectrum, amplitude = self.frequency_spectrum(start)\n peaks = self.get_peaks(spectrum, amplitude)\n\n if self.print:\n self.plot_clip(spectrum, amplitude, peaks)\n \n return self.get_note_probabilities(peaks)", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def position_line(self, prc=50.0):\n rtc = self._get_fibonnaci_level(prc)[0]\n x_pos = [self.roi.pos()[0], rtc]\n y_pos = [self.roi.pos()[0] + self.roi.size()[0], rtc]\n return x_pos, y_pos", "def get_position(self):\n return self._I85_msg_from_device(self.node.sdo[0x6064].phys) # rad", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]" ]
[ "0.64851797", "0.64164263", "0.64114904", "0.6347436", "0.62937754", "0.6191977", "0.6181522", "0.6140158", "0.60704404", "0.6055022", "0.6024718", "0.6013377", "0.5936175", "0.59327936", "0.5917486", "0.5900141", "0.58691347", "0.58691347", "0.58691347", "0.58618736", "0.5826415", "0.581827", "0.581133", "0.5806351", "0.5789689", "0.57857704", "0.5770776", "0.5765147", "0.5757849", "0.5730285" ]
0.7272576
0
Calculates new info about current pitch to display.
def updateInfo(self): if ( self.errorCount == 2 ): self.pitchText.text = "Unclear microphone input..." curNote = self.listener.pitch.note curFreq = self.listener.pitch.freq self.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune() tuneText = "%0.2f Hz off from %s (%0.1f Hz)" % (abs(self.tuneDelta), self.tuneNeighbor.note, curFreq) self.pitchText.text = tuneText
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pitch(self, pitch):\n pass", "def pitch(self):\n return self['pitch']", "def pitch(self):\n return self._pitch", "def newPitch(token, pitch, lastPitch):\n pitch.absolute(lastPitch)\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)", "def get_pitch(self, start):\n spectrum, amplitude = self.frequency_spectrum(start)\n peaks = self.get_peaks(spectrum, amplitude)\n\n if self.print:\n self.plot_clip(spectrum, amplitude, peaks)\n \n return self.get_note_probabilities(peaks)", "def getPitch(self):\n step = self._elem.find('pitch/step')\n octave = self._elem.find('pitch/octave')\n if step is None or octave is None:\n raise MusicXMLParseError(\"this note does not have pitch\")\n\n note_name = step.text\n octave = int(octave.text)\n notated_accidental = self._get_text('accidental')\n\n notated_sharp = notated_accidental == 'sharp'\n notated_flat = notated_accidental == 'flat'\n notated_natural = notated_accidental == 'natural'\n\n key = self._attributes.getKeySignature()\n key_accidental_char, key_accidental_list = ACCIDENTAL_TABLE[key]\n \n if notated_natural:\n note_name += '='\n return (note_name, octave)\n if not notated_natural and note_name in key_accidental_list: # see what the \n note_name += key_accidental_char\n if notated_sharp:\n note_name += '#'\n if notated_flat:\n note_name += 'b'\n\n return (note_name, octave)", "def initInfo(self):\n\t\tinfoCx, infoCy = self.width/2, self.height*0.85\n\t\tself.pitchText = self.createText( infoCx, infoCy, \n\t\t\t\t\t\t\t\t\t\t\tNone, self.pitchTextFont)\n\t\tself.pitchText.text = \"Currently: No pitch detected.\"\n\n\t\tself.title = self.createText( infoCx, self.height*0.15, None, self.titleFont)\n\t\tself.title.text = \"Tuner\"\n\t\tself.subtitle = self.createText(infoCx, self.height*0.2, None, self.labelFont)\n\t\tself.subtitle.text = \"(equal temperament)\"", "def get_voice_pitch(sim_info: SimInfo) -> float:\n # noinspection PyPropertyAccess\n return sim_info.voice_pitch", "def newPitch(token, pitch):\n changes.replaceToken(token,\n token.step + ly.pitch.octaveToString(pitch.octave) + token.cautionary)", "def pitch(self, evt=None):\n self.dbgprint(\"pitch(%r)\"%evt)", "def nice_output(self):\n return 'Pitch: {0} at {1}: {2}'.format(\n self.pitch_type, self.start_speed, self.des)", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def get_EUL_Pitch(self):\n eul_raw = self.i2c.mem_read(2, self.addr, OUT_EUL_PITCH_LSB)\n eul_pitch = self.sign_val(((eul_raw[1]<<8) + eul_raw[0]))/16.0\n return (eul_pitch)\n #print(eul_pitch)", "def __init__(self, pitch=30, pitch_type='duo', Z=4, Alt = 100):\n \n self.pitch_type = pitch_type\n self.pitch = pitch\n self.Z = Z\n self.Alt = Alt\n \n \n # set the Ce value (exposure coeff NA 2.16)\n self.Ce = 1\n \n # set the Ct value (thermal coeff NA 2.17)\n self.Ct = 1\n \n # snow load shjape coefficients\n if self.pitch_type == 'mono':\n if self.pitch <= 30:\n self.mu = 0.80\n elif 30 < self.pitch <= 60:\n self.mu = 0.80 * (60 - self.pitch) / 30\n else:\n self.mu = 0.0\n elif self.pitch_type == 'duo':\n if self.pitch <= 15:\n self.mu = 0.80\n elif 15 < self.pitch <= 30:\n self.mu = 0.80 + 0.40*(self.pitch - 15) / 15\n elif 30 < self.pitch <= 60:\n self.mu = 1.2*(60 - self.pitch) / 30\n else:\n self.mu = 0.0\n else:\n self.mu = 0.80 # end conservative number\n \n # calculate the value of the snow load on the ground \n self.sk = (0.15 + (0.1 * self.Z + 0.05) + ((self.Alt - 100) / 525))\n \n # calculate the roof snow load\n self.s = self.mu * self.Ce * self.Ct * self.sk", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def set_slot_pitch(self):\n Nmag_txt = self.tr(\"Number of magnets = 2p = \")\n if self.machine.rotor.slot.Zs is not None:\n Zs = self.machine.rotor.slot.Zs\n out = Nmag_txt + str(Zs) + \" => \"\n Slot_pitch = 360.0 / Zs\n Slot_pitch_rad = Slot_pitch * pi / 180\n\n pitch_txt = self.tr(\"Slot pitch = \")\n out += (\n pitch_txt\n + \"%.4g\" % (Slot_pitch)\n + u\" ° (\"\n + \"%.4g\" % (Slot_pitch_rad)\n + \" rad)\"\n )\n self.out_Nmag.setText(out)\n else:\n self.out_Nmag.setText(Nmag_txt + \"?\")", "def display_player_points():\r\n pass", "def relative():\n def transposeRelative(token, lastPitch):\n \"\"\"\n Make a new relative pitch from token, if possible.\n Return the last pitch used (absolute, untransposed).\n \"\"\"\n p = Pitch.fromToken(token, tokenizer)\n if not p:\n return lastPitch\n # absolute pitch determined from untransposed pitch of lastPitch\n octaveCheck = p.octaveCheck is not None\n p.absolute(lastPitch)\n if source.inSelection:\n # we may change this pitch. Make it relative against the\n # transposed lastPitch.\n try:\n last = lastPitch.transposed\n except AttributeError:\n last = lastPitch\n # transpose a copy and store that in the transposed\n # attribute of lastPitch. Next time that is used for\n # making the next pitch relative correctly.\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy # store transposed copy in new lastPitch\n new = copy.relative(last)\n if octaveCheck:\n new.octaveCheck = copy.octave\n if relPitchToken:\n # we are allowed to change the pitch after the\n # \\relative command. lastPitch contains this pitch.\n lastPitch.octave += new.octave\n new.octave = 0\n changes.replaceToken(relPitchToken[0], lastPitch.output(tokenizer.language))\n del relPitchToken[:]\n changes.replaceToken(token, new.output(tokenizer.language))\n return p\n\n lastPitch = None\n relPitchToken = [] # we use a list so it can be changed from inside functions\n \n # find the pitch after the \\relative command\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n lastPitch = Pitch.fromToken(token, tokenizer)\n if lastPitch and source.inSelection:\n relPitchToken.append(token)\n token = next(source)\n if not lastPitch:\n lastPitch = Pitch.c1()\n \n # eat stuff like \\new Staff == \"bla\" \\new Voice \\notes etc.\n while True:\n if token in ('\\\\new', '\\\\context'):\n next(source) # skip context type\n token = next(source)\n if token == '=':\n next(source) # skip context name\n token = next(source)\n elif isinstance(token, tokenizer.NoteMode):\n token = next(source)\n else:\n break\n \n # now transpose the relative expression\n if isinstance(token, tokenizer.OpenDelimiter):\n # Handle full music expression { ... } or << ... >>\n for token in consume():\n if token == '\\\\octaveCheck':\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n p = Pitch.fromToken(token, tokenizer)\n if p:\n if source.inSelection:\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy\n changes.replaceToken(token, copy.output(tokenizer.language)) \n lastPitch = p\n del relPitchToken[:]\n elif isinstance(token, tokenizer.OpenChord):\n chord = [lastPitch]\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n lastPitch = chord[:2][-1] # same or first\n break\n elif isinstance(token, tokenizer.Pitch):\n chord.append(transposeRelative(token, chord[-1]))\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.OpenChord):\n # Handle just one chord\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n break\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.Pitch):\n # Handle just one pitch\n transposeRelative(token, lastPitch)", "def update_points(self, correct):\n\n if correct:\n self.points += 10\n \n if self.points > ((self.current_level + 1) * 100):\n self.play_sound('level_up', self.standard_sfx, True)\n self.play_sound(choice(self.correct_voice),self.standard_voice, wait=True)\n self.play_sound('combinations',self.game_sounds, wait=True)\n self.current_level += 1\n print(self.current_level)\n if self.current_level > 4:\n self.current_level = 4", "def pitch_dia(self, value):\n Gear.pitch_dia.fset(self, value)\n self._calcs()", "def update_meters(self):\n self.previous_meters = self.current_meters\n self.current_meters = {'X' : 0,\n 'Y' : 0,\n 'RSSI' : 0}", "def getPosFromPitch(self, pitch):\n\t\t# Width of scale: \n\t\t#\tself.width - 2*self.margin\n\t\t# Width of each individual note: \n\t\t#\twidth of scale / len(Pitch.noteNames) \n\t\t# Width of each subdivision inside the note: \n\t\t# width of ind note / self.scaleSubSections\n\t\t\n\t\tif ( type(self.listener.pitch) == type(None) ):\n\t\t\tself.errorCount += 1\n\t\t\treturn self.triTip[0] # don't move\n\t\t\t\n\t\tself.errorCount = 0 \n\t\tscaleWidth = self.width - 2*self.margin\n\t\tscaleNoteSubWidth = self.scaleNoteWidth / self.scaleSubSections\n\t\tcurMidi = self.listener.pitch.midi\n\t\tscaleFraction = (curMidi % 12) + 1\n\t\t\n\t\t# Distance of scale block from left of screen\n\t\txPos = (self.margin + scaleFraction * \n\t\t\t\tself.scaleNoteWidth - (12*scaleNoteSubWidth) )\n\t\tif ( self.margin <= xPos ):\n\t\t\treturn xPos\n\t\telse:\t\n\t\t\treturn self.triTip[0] # off screen, don't move", "def adjust_pitchset(self):\n register_pitch_set = np.array(list(set([i%12 for i in self.register])))\n is_in = np.isin(self.pitch_set, register_pitch_set)\n if not np.all(is_in):\n self.pitch_set = self.pitch_set[is_in]\n self.weights_ = self.weights_[is_in]\n self.weights_ = self.weights_ / np.sum(self.weights_)", "def set_x2pitchlabel(self):\n labelstr_dcm = \"DCM\\nenergy:\\t\\t{:.9f}\\nexit offset:\\t{:.9f}\\npitch:\\t\\t{:.9f}\\nfb stepsize:\\t{:.9f}\\n\\nbeamstop:\\t\\t{:.1f}°\\n\\nundulator:\\t{}\\ngap:\\t\\t{:.9f}\\n\\n{}\"\n labelstr_dmm = \"DMM\\nbragg:\\t\\t{:.9f}\\npitch:\\t\\t{:.9f}\\nx1z:\\t\\t{:.9f}\\nx2z:\\t\\t{:.9f}\\nx2y:\\t\\t{:.9f}\\nfb stepsize:\\t{:.9f}\\n\\nbeamstop:\\t\\t{:.1f}°\\n\\nundulator:\\t{}\\ngap:\\t\\t{:.9f}\\n\\n{}\"\n mono = self.get_mono()\n if mono == \"dcm\":\n self.pitch_label.setText(labelstr_dcm.format(self.dcm_energy_tserver.Position, self.dcm_energy_tserver.ExitOffset, self.dcm_pitch_tserver.Position, self.last_corr_angle, self.beamstop.TEMP_OUT[0], self.undulator.State(), self.undulator.Gap, self.feedback_time))\n if mono == \"dmm\":\n self.pitch_label.setText(labelstr_dmm.format(self.dmm_x1rot_tserver.Position, self.dmm_x2rot_tserver.Position, self.dmm_x1z_tserver.Position, self.dmm_x2z_tserver.Position, self.dmm_x2y_tserver.Position, self.last_corr_angle, self.beamstop.TEMP_OUT[0], self.undulator.State(), self.undulator.Gap, self.feedback_time))", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def on_cam_base_pitch_hSlider_valueChanged(self, value):\n self.cam_base_pitch_ledit.setText(str(100 + value))", "def plot_pitch_contour(self, idx):\n pitch_contour = self.perf_data[idx]['pitch_contour']\n plt.plot(pitch_contour)\n plt.ylabel('pYin Pitch Contour (in Hz)')\n plt.show()", "def update(self):\n # print(\"timebar:\", self.controls[\"timebar\"])\n roll_index = 0\n key_color = [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0]\n key_color.reverse()\n self.pianoroll.fill((30, 30, 40))\n self.reduce_velocity()\n\n if self.controls[\"bar_move_velocity\"] > 0:\n self.controls[\"timebar\"] -= self.controls[\"bar_move_velocity\"]\n elif self.controls[\"timebar\"] < 0:\n self.controls[\"timebar\"] -= self.controls[\"bar_move_velocity\"]\n\n # draw horizontal lines\n for j in range(10, self.height, 10):\n py.gfxdraw.line(self.pianoroll, 20, j, self.width, j, (255, 255, 255, 30))\n\n # below code is for rendering the piano keys on the left\n if key_color[roll_index] == 0:\n py.draw.rect(self.pianoroll, (255, 255, 255), (0, j + 2, 15, 8)) # white keys\n # py.draw.rect(self.pianoroll, (0, 100, 50), (20, j + 2, self.width - 20, 8))\n py.draw.rect(self.pianoroll, (100, 0, 100), (20, j + 2, self.width - 20, 8))\n else:\n py.draw.rect(self.pianoroll, (0, 0, 0), (0, j + 2, 15, 8)) #\n pass\n roll_index = (roll_index + 1) % 12\n\n roll_index = 0\n self.draw_notes()\n for j in range(10, self.height, 10):\n if (key_color[roll_index] == 0):\n py.draw.rect(self.pianoroll, (255, 255, 255), (0, j + 2, 15, 8)) # white keys\n # py.draw.rect(self.pianoroll, (30, 30, 50), (20, j + 2, self.width - 20, 8))\n else:\n py.draw.rect(self.pianoroll, (0, 0, 0), (0, j + 2, 15, 8)) #\n pass\n roll_index = (roll_index + 1) % 12\n\n # draw vertical lines\n fourth = 0\n\n # t = int(np.ceil( (self.controls[\"timebar\"]+20)/int(20 * self.controls[\"h_zoom\"])))\n # s = t+800\n for i in range(20, int(20 * self.controls[\"h_zoom\"] * self.measures * 32) + 30,\n int(20 * self.controls[\"h_zoom\"])):\n # the two conditions are for making the fourth line brighter\n # q = str(int(self.controls[\"timebar\"]))+\" \"+str(i)+\" \"+str(int(self.controls[\"timebar\"] + i))\n if int(self.controls[\"timebar\"] + i) >= 20 and int(self.controls[\"timebar\"] + i) <= 800:\n # q+= str(\" D\")\n if fourth % 4 == 0:\n py.gfxdraw.line(self.pianoroll, int(self.controls[\"timebar\"] + i), 10,\n int(self.controls[\"timebar\"] + i), self.height - 10, (255, 255, 255, 80))\n else:\n py.gfxdraw.line(self.pianoroll, int(self.controls[\"timebar\"] + i), 10,\n int(self.controls[\"timebar\"] + i), self.height - 10, (255, 255, 255, 30))\n # print(q)\n fourth += 1\n\n # creates color differentiation for the black key rows and white key rows\n\n # creates the timebar on the top of pianoroll\n index = 0\n for i in range(20, int(20 * self.controls[\"h_zoom\"] * (self.measures + 1) * 32),\n int(20 * self.controls[\"h_zoom\"] * 32)):\n surf, rect = self.timeFont.text_object(str(index))\n rect.topleft = (self.controls[\"timebar\"] + i, 0)\n if (rect.topleft[0] >= 20):\n self.pianoroll.blit(surf, rect)\n index += 1", "def midi_pitch_fraction(self) -> int:\n return self.__midi_pitch_fraction" ]
[ "0.69431853", "0.65404475", "0.63204634", "0.6193945", "0.61745614", "0.60986906", "0.6051548", "0.5971109", "0.5943813", "0.5887895", "0.58531415", "0.5837766", "0.5800079", "0.5784507", "0.56408966", "0.56292516", "0.56004333", "0.55238503", "0.5481963", "0.5460267", "0.5458219", "0.5426897", "0.54153055", "0.539459", "0.5377945", "0.53732115", "0.5356831", "0.53293025", "0.5316979", "0.5314058" ]
0.70381564
0
Convert (if necessary) address family name to numeric value.
def NormalizeAddressFamily(self, af): # ensure address family (af) is valid if af in self.AF_MAP_BY_NUMBER: return af elif af in self.AF_MAP: # convert AF name to number (e.g. 'inet' becomes 4, 'inet6' becomes 6) af = self.AF_MAP[af] else: raise UnsupportedAFError('Address family %s is not supported, ' 'term %s.' % (af, self.term.name)) return af
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_number_family(number_str: str, mask: int) -> str:\n number_family = ''\n number_str_len = len(number_str)\n for idx, binary_mask in enumerate(decimal_to_binary(mask, number_str_len)):\n number_family += number_str[idx] if binary_mask == '0' else '*'\n return number_family", "def address_family(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address_family\")", "def AddressFamily(self) -> AddressFamily:", "def _parse_addr(self, addr_str):\n addr = [int(i) for i in addr_str.split('.')]\n if len(addr) != 4 or any([i < 0 for i in addr]) or any([i > 255 for i in addr]):\n raise ValueError('Invalid IP address: %s' % addr_str)\n val = 0\n for i in addr:\n val *= 255\n val += i\n return val", "def address_family(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"address_family\")", "def get_nh_family(self):\n return int(self.get('nhr_family'))", "def _parse_addr(self, addr: str):\n addr = addr.upper()\n return self._registers_list.get(addr, None)", "def _get_as_num(self, msg):\n country_m = re.search(\"country:(.*)\", msg, flags=re.IGNORECASE)\n try:\n country_code = country_m.group(1).strip()\n except AttributeError:\n country_code = None\n\n originAS_m = re.search(\"OriginAS:(.*)\", msg, flags=re.IGNORECASE)\n try:\n originAS = originAS_m.group(1).strip()\n except AttributeError:\n originAS_m = re.search(\"Origin:(.*)\", msg, flags=re.IGNORECASE)\n try:\n originAS = originAS_m.group(1).strip()\n except AttributeError:\n originAS = None\n\n return country_code, originAS", "def address_family(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address_family\")", "def encode_ga(addr: Union[str, GroupAddress]) -> int:\n def conv(main, middle, sub):\n return (int(main) << 11) + (int(middle) << 8) + int(sub)\n\n if isinstance(addr, str):\n parts = addr.split('/')\n if len(parts) == 3:\n return conv(parts[0], parts[1], parts[2])\n elif isinstance(addr, GroupAddress):\n return conv(addr.main, addr.middle, addr.sub)\n raise ValueError", "def get_family_name(self):\n return self.family_name", "def _to_float(self, s: str) -> float:\n return int(s[:-1]) / 1e9 if s.endswith('n') else float(s[:-1])", "def addr_to_decimal(bin_address):\n if len(bin_address) == 32:\n if re.match('[0-1]+', bin_address):\n return (str(to_decimal(bin_address[0:8])) + '.'\n + str(to_decimal(bin_address[8:16])) + '.'\n + str(to_decimal(bin_address[16:24])) + '.'\n + str(to_decimal(bin_address[24:32])))\n return '-1'", "def _get_notary_address(from_key):\n return _hash(FAMILY_NAME.encode('utf-8'))[0:6] + \\\n _hash(from_key.encode('utf-8'))[0:64]", "def _read_addr_resolve(self, addr: 'bytes', htype: 'int') -> 'str':\n if htype == Enum_Hardware.Ethernet: # Ethernet\n if py38:\n _addr = addr.hex(':')\n else:\n _addr = ':'.join(textwrap.wrap(addr.hex(), 2))\n else:\n _addr = addr.hex()\n return _addr", "def name_to_number(self, name):\r\n try:\r\n return self._numbers[name]\r\n except KeyError:\r\n raise KeyError(\"No field named %s in %r\" % (name, self._numbers.keys()))", "def get_address(address, registers):\n \n try:\n address, offset = address.split('+')\n offset = int(offset)\n except ValueError:\n try:\n address, offset = address.split('-')\n offset = -int(offset)\n except ValueError:\n offset = 0\n\n if address.isdigit():\n return int(address)\n\n return int(registers[address]) + offset", "def _read_proto_resolve(self, addr: 'bytes', ptype: 'int') -> 'str | IPv4Address | IPv6Address':\n if ptype == Enum_EtherType.Internet_Protocol_version_4: # IPv4\n return ipaddress.ip_address(addr)\n if ptype == Enum_EtherType.Internet_Protocol_version_6: # IPv6\n return ipaddress.ip_address(addr)\n return addr.hex()", "def get_family_id(self):\n return self._family_id", "def str_to_num(value):\n if isinstance(value, numbers.Number):\n return value\n try:\n return int(value)\n except ValueError:\n return float(value)", "def formatPostalCode(string):\n if string.isdigit():\n return int(string)\n else :\n return 0", "def fax_number(self) -> str:\n return self._fax_number", "def number(full_address):\n warning_message = \"\"\"\\n\n This parser should be used with the knowledge that this\n function is open to four significant vulnerabilities:\n 1) `number()` will parse the first numeric characters it\n an address string contains (read from left to right).\n If the address string has:\n a) no building number\n b) numeric characters unrelated to addressable\n information at the start of the address string\n 2) Address numbers separated by `&` or `,` will not be parsed\n 3) Building names that include numeric characters are\n incorrectly parsed as building numbers\\n\n \"\"\"\n warnings.warn(warning_message)\n return capture_address_element(NUMBER_PATTERN, full_address)", "def convert_label_string2num(label, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n if num_types==4:\n label = label.replace('Implicit_', '')\n label = label.replace('Explicit_', '')\n return all_labels.index(label)", "def getUS() -> int:\n pass", "def _parseNumber(self, str):\r\n\t\tif (str.count(\".\") == 0):\r\n\t\t\treturn int(str)\r\n\t\tif (str.count(\".\") == 1):\r\n\t\t\treturn float(str)\r\n\t\treturn str", "def parse_addr(addr):\n\ttry:\n\t\tnew_addr = socket.inet_aton(addr)\n\texcept:\n\t\taddr = socket.gethostbyname(addr)\n\t\ttry:\n\t\t\tnew_addr = socket.inet_aton(addr)\n\t\texcept ValueError:\n\t\t\tlogging.exception('Error:')\n\t\t\traise ValueError, 'Invalid address: %s' % addr\n\n\treturn new_addr", "def convert_zip_code(zipcode):\n zipcode = tf.strings.regex_replace(zipcode, r\"X{0,5}\", \"0\")\n zipcode = tf.strings.to_number(zipcode, out_type=tf.float32)\n return zipcode", "def _format_address(self,address):\n address = int(address)\n if address >=1 and address <= 250:\n address = hex(int(address)) #Convert address if between 0-250.\n if len(address) == 3: #Take the last char and append a zero.\n address = str(address[-1]).rjust(2,'0')\n elif len(address) == 4:\n address = address[-2:] #Take the last two char. \n return address\n elif address == 0:\n address = '00'\n return address\n else:\n return False", "def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])" ]
[ "0.59316474", "0.58998024", "0.58851534", "0.5860401", "0.58235425", "0.5787867", "0.5732305", "0.56197286", "0.55823547", "0.55612534", "0.5500597", "0.54511064", "0.5359545", "0.5355764", "0.5311285", "0.53018296", "0.5243753", "0.5242974", "0.5240195", "0.5229493", "0.5208045", "0.52067715", "0.5170368", "0.5134046", "0.5125824", "0.51162654", "0.5112587", "0.5112127", "0.51066", "0.51029897" ]
0.62958986
0
Return verified list of appropriate icmptypes.
def NormalizeIcmpTypes(self, icmp_types, protocols, af): if not icmp_types: return [''] # only protocols icmp or icmpv6 can be used with icmp-types if protocols != ['icmp'] and protocols != ['icmpv6']: raise UnsupportedFilterError('%s %s' % ( 'icmp-types specified for non-icmp protocols in term: ', self.term.name)) # make sure we have a numeric address family (4 or 6) af = self.NormalizeAddressFamily(af) # check that addr family and protocl are appropriate if ((af != 4 and protocols == ['icmp']) or (af != 6 and protocols == ['icmpv6'])): raise MismatchIcmpInetError('%s %s, %s: %s, %s: %s' % ( 'ICMP/ICMPv6 mismatch with address family IPv4/IPv6 in term', self.term.name, 'address family', af, 'protocols', ','.join(protocols))) # ensure all icmp types are valid for icmptype in icmp_types: if icmptype not in self.ICMP_TYPE[af]: raise UnknownIcmpTypeError('%s %s %s %s' % ( '\nUnrecognized ICMP-type (', icmptype, ') specified in term ', self.term.name)) rval = [] rval.extend([self.ICMP_TYPE[af][x] for x in icmp_types]) rval.sort() return rval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_check_types():", "def listVerificationTypes(self):\n return self.get_json('/verificationType')", "def etypes(self): # -> list[str]:\n ...", "def getCertificateTypes(self):\n ret = []\n for ct in self.certificateTypes:\n if ct == \"x509\":\n ret.append(CertificateType.x509)\n else:\n raise AssertionError()\n return ret", "def ntypes(self): # -> list[str]:\n ...", "def test_ticket_type_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def etypes(self) -> Sequence[str]:\n\n return [can_etype[1] for can_etype in self.canonical_etypes]", "def etypes(self): # -> list[None]:\n ...", "def getDhcpLogByType():\n\tstats = []\n\torder=[(\"dis\",\"Discover\"), (\"off\",\"Offer\"), (\"req\",\"Request\"), (\"ack\",\"Ack\"), (\"nak\",\"Nak\"), (\"inf\", \"Inform\")]\n\tfor t, display in order:\n\t\ttmp = DHCPEvent.objects.filter(dhcpType=t).count()\n\t\tif tmp > 0:\n\t\t\tstats.append((display, tmp))\n\treturn stats", "def _get_errortypes_params(error_types):\n return {et: \"1\" for et in error_types}", "def info_types(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetInfoType']:\n return pulumi.get(self, \"info_types\")", "def ntypes(self): # -> list[None]:\n ...", "def test_get_types(self):\n pass", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def gen_test_case_type_check(self):\n cases = '\\n\\n;; Type check'\n assert_template = '(assert_invalid (module (func (result v128) ({lane_type}.{op} (i32.const 0) (f32.const 0.0)))) \"type mismatch\")'\n for op in self.BINARY_OPS:\n cases += '\\n' + assert_template.format(lane_type=self.LANE_TYPE, op=op)\n\n return cases", "def ntypes(self) -> Sequence[str]:\n ntypes = list(self.num_nodes_dict.keys())\n return ntypes", "def listtypes(self):\n\n pattern_types = [i for i in sorted(self._allowed_patterns.iterkeys())]\n\n return pattern_types", "def getProposalTypesVocab(self):\n list = DisplayList()\n # Acquire the types\n types = self.aq_inner.aq_parent.getProposalTypes()\n for type in types:\n list.add(type, type)\n return list", "def info_types(self) -> Optional[List['outputs.PreventionInspectTemplateInspectConfigInfoType']]:\n return pulumi.get(self, \"info_types\")", "def etypes(self): # -> None:\n ...", "def test_type_result(self):\n result = self.parser.msg_analysis(MSG_TEST_NO_RESULT[0])\n assert isinstance(result, list)", "def get_all_servers_types():\n ret = _get_list(\n lambda server: server.type if server.type not in ['vanilla.winter', 'vanilla.desert', 'pvp'] else False,\n lambda server: server.type_name\n )\n\n # Extra server type filters\n ret.append({\n 'value': 'pacific+edelweiss',\n 'label': 'RWR: WWII DLCs'\n })\n\n return ret", "def opinion_type_list():\n for type_ in orm.DataFlagOpinionType.select():\n click.echo(type_.name)", "def _parse_name_type_pairs(self, array, types):\n pred_list = []\n if len(array)%3 != 0:\n print(\"Expected predicate to be typed \" + str(array))\n sys.exit()\n for i in range(0, int(len(array)/3)):\n if array[3*i+1] != '-':\n print(\"Expected predicate to be typed\")\n sys.exit()\n if array[3*i+2] in types:\n pred_list.append((array[3*i], array[3*i+2]))\n else:\n print(\"PARSING ERROR {} not in types list\".format(array[3*i+2]))\n print(\"Types list: {}\".format(self.type_list))\n sys.exit()\n return pred_list", "def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))", "def get_data_types(psort_dict):\n data_types = set()\n for event in psort_dict.keys():\n data_types.add(psort_dict[event][\"data_type\"])\n return data_types", "def getPrimaryTypes() -> List[int]:\n ...", "def ntypes(self): # -> None:\n ...", "def supported_provisioning_types(self):\n return self.properties.get(\"supportedProvisioningTypes\", StringCollection())" ]
[ "0.606907", "0.59195083", "0.57211316", "0.56508046", "0.5586044", "0.5520569", "0.5446284", "0.54309523", "0.53693205", "0.523012", "0.5220658", "0.5187415", "0.5184833", "0.51365936", "0.5123326", "0.5120659", "0.50918657", "0.5088941", "0.5084246", "0.49756548", "0.49311334", "0.49238622", "0.4921744", "0.49135813", "0.49122155", "0.48842996", "0.48781484", "0.4871188", "0.48552966", "0.48543426" ]
0.6916867
0
Return a term name which is equal or shorter than _TERM_MAX_LENGTH. New term is obtained in two steps. First, if allowed, automatic abbreviation is performed using hardcoded abbreviation table. Second, if allowed, term name is truncated to specified limit.
def FixTermLength(self, term_name, abbreviate=False, truncate=False, override_max_length=None): new_term = term_name if override_max_length is None: override_max_length = self._TERM_MAX_LENGTH if abbreviate: for word, abbrev in self._ABBREVIATION_TABLE: if len(new_term) <= override_max_length: return new_term new_term = re.sub(word, abbrev, new_term) if truncate: new_term = new_term[:override_max_length] if len(new_term) <= override_max_length: return new_term raise TermNameTooLongError('Term %s (originally %s) is ' 'too long. Limit is %d characters (vs. %d) ' 'and no abbreviations remain or abbreviations ' 'disabled.' % (new_term, term_name, override_max_length, len(new_term)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abbreviator(max_length):\n \n def abbreviate(text):\n if len(text) <= max_length:\n return text\n else:\n return text[: max_length - 3] + \"...\"\n\n return abbreviate", "def _truncate_name(orig_str, word_num):\n if not orig_str:\n return orig_str\n tokens = string_utils.tokenizer(orig_str)\n if len(tokens) > word_num:\n orig_str = ' '.join(tokens[:word_num])\n return orig_str", "def _shortname(some_string, maxlen=20):\n some_string = str(some_string)\n if not get_config()['truncate_labels']:\n return some_string\n if len(some_string) > maxlen:\n return some_string[:maxlen - 3] + \"...\"\n else:\n return some_string", "def get_available_name(self, name, max_length=None):\n return name", "def shn_abbreviate(word, size=48):\n\n if word:\n if (len(word) > size):\n word = \"%s...\" % word[:size - 4]\n else:\n return word\n else:\n return word", "def test_long_name():\n expect_error(register, InputError,\n \"a\", \"abcdef\", \"a\" * (MAX_NAME + 1), \"a\", \"a\")\n expect_error(register, InputError,\n \"a\", \"abcdef\", \"a\", \"a\" * (MAX_NAME + 1), \"a\")", "def _model_string_maxlen():\n # hardcoded for convenience. Could be dynamically set in future.\n # the current longest is: BLOSUM62+I+G+X, i.e. 14 chars.\n # so we just over double it, for safety\n\n return 30", "def too_long_words(word):\n\n # If work is longer than 10 letters, print the word according to these rules\n if len(word) > 10:\n print word[0] + str(len(word[1:-1])) + word[-1]\n\n else:\n print word", "def get_available_tenant_name(self, name, max_length=64):\n keystone = self.keystone_admin_client\n try:\n tenants = keystone.projects.list(domain=self._get_domain())\n except keystone_exceptions.ClientException as e:\n raise OpenStackBackendError(e)\n\n names = [tenant.name for tenant in tenants]\n new_name = name\n\n # If the tenant name already exists, add an underscore and a random 3\n # character alphanumeric string to the tenant name until the generated name doesn't exist.\n # Truncate original name if required, so the new name does not exceed the max_length.\n while new_name in names:\n new_name = f\"{name}_{get_random_string(3)}\"\n truncation = len(new_name) - max_length\n if truncation > 0:\n new_name = f\"{name[:-truncation]}_{get_random_string(3)}\"\n return new_name", "def get_field_length_error_text(field_name):\n\n\treturn(\"Value entered for '{0}' exceeds character length limit of {1}\"\n\t\t .format(field_name, str(field_length_limit)))", "def render_word(self, min_length=3, max_length=12):\n while True:\n word = \"\".join(self.render(lambda o: len(o) > 1 and o[-1] == \" \", lambda n: n[0] == \" \"))\n if min_length <= len(word.strip()) <= max_length:\n return word.strip()", "def create_random_name(length):\n name = (''.join(random.choices(string.ascii_lowercase, k=length)))\n name_capitalized = name.capitalize()\n return name_capitalized", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def get_random_job_prefix(fixed_prefix='',\r\n max_job_prefix_len=10,\r\n leading_trailing_underscores=True):\r\n\r\n length = max_job_prefix_len - len(fixed_prefix)\r\n if leading_trailing_underscores:\r\n length -= 2\r\n\r\n result = [choice(RANDOM_JOB_PREFIX_CHARS) for i in range(length)]\r\n if leading_trailing_underscores:\r\n return fixed_prefix + '_' + ''.join(result) + '_'\r\n else:\r\n return fixed_prefix + ''.join(result)", "def name(self, value):\n max_characters = 512\n conditions = [validate_max_length(value, max_characters)]\n if all(conditions):\n self._update_values('name', value)\n else:\n raise InvalidValue(f'{value} is invalid. Condition max_characters must be less than or equal to '\n f'{max_characters}')", "def _get_random_job_prefix(self,\r\n fixed_prefix='',\r\n max_job_prefix_len=10,\r\n leading_trailing_underscores=True):\r\n\r\n length = max_job_prefix_len - len(fixed_prefix)\r\n if leading_trailing_underscores:\r\n length -= 2\r\n\r\n result = [choice(RANDOM_JOB_PREFIX_CHARS) for i in range(length)]\r\n if leading_trailing_underscores:\r\n return fixed_prefix + '_' + ''.join(result) + '_'\r\n else:\r\n return fixed_prefix + ''.join(result)", "def cut_phrase(phrase):\n return phrase if len(phrase) < max_len else phrase[:max_len] + \"...\"", "def _term_name(self):\n \n year = self.term[0:3]\n season = self.term[4:]\n if season == self.TERMNUM[1]:\n result = \"{}{}\".format(year, self.TERM[0])\n \n elif season == self.TERMNUM[2]:\n result = \"{}{}\".format(year, self.TERM[1])\n \n else:\n result = \"{}{}\".format(year, self.TERM[2])\n \n return result", "def __get_term_bigram(self, indeks_token_pertama:int, tokens:list):\n term1 = tokens[indeks_token_pertama]\n term2 = tokens[indeks_token_pertama + 1]\n return ' '.join([term1, term2])", "def test_search_terms_max_length(self):\n max_length = self.search._meta.get_field('terms_en').max_length\n self.assertEqual(max_length, 50)", "def trim_display_field(self, value, max_length):\n if not value:\n return ''\n if len(value) > max_length:\n return value[:max_length - 3] + '...'\n return value", "def shortName(self, length = 3):\n if len(self.short) == length: return self.short\n s = str(self)\n if len(s) < length:\n self.short = s + \" \"*(length-len(s))\n return self.short\n r = []; alphaNum = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n for ch in s:\n if ch in alphaNum:\n r.append(ch)\n elif ch in \", \": alphaNum = alphaNum + \"0123456789\"\n elif ch == \".\":\n del r[-1]\n alphaNum = alphaNum[:26]\n r = r[:length]\n if len(r) < length: r.extend([\" \"]*(length-len(r)))\n if self.suffix >= 1: r[-1] = str(self.suffix)[-1]\n self.short = \"\".join(r)\n return self.short", "def get_word(naf: KafNafParser, term: Cterm) -> str:\n tokenids = naf.get_dict_tokens_for_termid(term.get_id())\n tokens = sort_tokens(naf.get_token(tid) for tid in tokenids)\n return \" \".join(t.get_text() for t in tokens)", "def _get_maximum_prefix(self):\n return self.__maximum_prefix", "def xterm_title(value, max_length=74, bypass_term_check=False):\n TERM = os.getenv('TERM')\n if not bypass_term_check and TERM not in TERM_TITLE_SUPPORTED:\n return\n sys.stderr.write('\\033]2;'+value[:max_length]+'\u0007')\n sys.stderr.flush()", "def shortened_text(self, max_chars=50):\n if len(self.text) > max_chars:\n return self.text[:max_chars] + \"...\"\n else:\n return self.text", "def get_max_word_length(self, word_dict):\n max_len = 0\n max_word = \"\"\n for word in word_dict:\n word = \"^\" + word + \"$\"\n if len(word) > max_len:\n max_len = len(word)\n max_word = word\n print(\"Longest word: \" + max_word + \" \" + str(max_len))\n return max_len", "def upperLimit(lenFact1):\n\tfact2 = lenFact1*fact9\n\tlenfact2 = len(str(fact2))\n\tmaxFact = lenfact2*fact9\n\tlenMax = len(str(maxFact))\n\tif lenMax == lenfact2: \n\t\treturn maxFact\n\telse:\n\t\treturn upperLimit(lenMax)", "def get_suffix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n start = len(word) - length\n return word[start:]\n else:\n return word.rjust(length, \"*\")", "def str_maxed(arg, maxlen, ellipsis_str=\"..\"):\n s = str(arg)\n if maxlen <= 0 or len(s) <= maxlen:\n return s\n else:\n return \"%s%s\" % (s[:maxlen], ellipsis_str)" ]
[ "0.69068295", "0.610185", "0.59963334", "0.59946245", "0.5861028", "0.57489294", "0.5744382", "0.57135934", "0.5641767", "0.5536663", "0.55157727", "0.55130416", "0.54997385", "0.5461334", "0.54450923", "0.5429746", "0.5419327", "0.5365891", "0.53508574", "0.53489125", "0.5332583", "0.53153694", "0.5313477", "0.53127927", "0.53078365", "0.52967656", "0.5277516", "0.5268323", "0.52675223", "0.524425" ]
0.7218318
0
Return a hexadecimal digest of the name object.
def HexDigest(self, name, truncation_length=None): if truncation_length is None: truncation_length = 64 name_bytes = name.encode('UTF-8') return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def hash_cli_name(name):\n from hashlib import blake2b\n return blake2b(name.encode(), digest_size=32).hexdigest()", "def name_hash(namepart):\n return sha256(os.fsencode(namepart)).hexdigest()", "def _hash_name(self, name, length=None):\n if not length:\n length = self.header_size\n hashed = name[:min(length, len(name))]\n for x in range(length, len(name), length):\n rem = min(x+length,len(name))-x\n for i in range(rem):\n hashed = hashed[:i] + chr(ord(name[x + i]) ^ ord(hashed[i])) + hashed[i+1:]\n if len(hashed) < length:\n hashed += '\\x00' * (length-len(hashed))\n return hashed", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def __hash__(self):\n Name._hashes_used += 1\n if self._name is None:\n return 0\n value = ord(self._name[0]) << 7\n for char in self._name:\n value = c_mul(1000003, value) ^ ord(char)\n value = value ^ len(self._name)\n if value == -1:\n value = -2\n # The result is trimmed down to 31 bits (plus a sign bit) to give\n # consistent results on 32 and 64 bit systems\n # Otherwise hash() will implicitly do this based on the Python build\n # see https://docs.python.org/3/reference/datamodel.html#object.__hash__\n value = value % 0b0111_1111_1111_1111_1111_1111_1111_1111\n return value", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def dopplr(name):\n return \"#\" + hashlib.sha224(name).hexdigest()[:6]", "def hash(self) -> str:\r\n ...", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def __str__(self: Hash) -> str:\n return self.to_hex()", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def _digest(self):\n return self._hasher.hexdigest()", "def sdbm_hash(name):\n ret = 0\n for ii in name:\n ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF\n return hex(ret)", "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def digest(self):\n return self._hash" ]
[ "0.76227564", "0.7453346", "0.72933936", "0.71679014", "0.7166738", "0.7118155", "0.6900448", "0.6831404", "0.6758074", "0.6747787", "0.6685873", "0.66172093", "0.66035944", "0.6568217", "0.65670246", "0.65318817", "0.6527971", "0.6488435", "0.6488435", "0.6488435", "0.6488435", "0.6488435", "0.6488435", "0.6479202", "0.6465758", "0.6401301", "0.6401301", "0.6401301", "0.6383226", "0.63809323" ]
0.8001484
0
Convert a protocol name to a numeric value.
def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map): return_proto = [] for protocol in protocols: if protocol in proto_to_num: return_proto.append(name_to_num_map[protocol]) else: return_proto.append(protocol) return return_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name_to_number(self, name):\r\n try:\r\n return self._numbers[name]\r\n except KeyError:\r\n raise KeyError(\"No field named %s in %r\" % (name, self._numbers.keys()))", "def decode_network_number(ptype, plen, buf):\n return number.unpack_from(buf, header.size)[0]", "def str_to_num(value):\n if isinstance(value, numbers.Number):\n return value\n try:\n return int(value)\n except ValueError:\n return float(value)", "def convert_label_string2num(label, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n if num_types==4:\n label = label.replace('Implicit_', '')\n label = label.replace('Explicit_', '')\n return all_labels.index(label)", "def str2num(s):\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return float(s)", "def str_to_num(s):\n\n method = {\n \"float\": string.atof,\n \"int\": string.atoi\n }\n\n if not type(s) is StringType:\n return 0\n\n if \".\" in s:\n return method[\"float\"](s)\n else:\n return method[\"int\"](s, 10)", "def currency_to_protocol(amount):\n if type(amount) == float:\n amount = \"%.8f\" % amount\n\n return int(amount.replace(\".\", '')) # avoiding float math", "def port(name):\n\n words = name.upper().split('-', 1)\n\n if len(words) == 1:\n words.append(words[0][1])\n\n return int(f\"{ord(words[0][0])}{ord(words[1][0])}\")", "def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError", "def convert_to_num(version_str):\n if not version_str:\n return 0\n if str(version_str).isdigit():\n return version_str\n version_str = version_str.replace(\".\", \"\")\n return int(version_str) if version_str.isdigit() else 0", "def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default", "def symb_to_num(symbolic):\n\n if len(symbolic) == 9:\n group = (symbolic[:-6], symbolic[3:-3], symbolic[6:])\n try:\n numeric = notation[group[0]] + notation[group[1]] + notation[group[2]]\n except:\n numeric = \"Invalid Symbolic Representation!\"\n else:\n numeric = \"Symbolic input should be of lengh 9!\"\n\n return numeric", "def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print \"Name is invalid!\"\n return 1\n return number", "def get(self,num):\n\t\t_result = None\n\t\tif num in self._protocols:\n\t\t\t_result = self._protocols[num]\n\n\t\treturn _result", "def name_to_number(name):\n if (name == 'rock' or name == 'Rock'):\n return 0\n elif (name == 'Spock' or name == 'spock'):\n return 1\n elif (name == 'paper' or name == 'Paper'):\n return 2\n elif (name == 'lizard' or name == 'Lizard'):\n return 3\n elif (name == 'scissors' or name == 'Scissors'):\n return 4\n else:\n return -1", "def name_to_number(name):\n\n # A simple if/elif/else game...\n\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n else:\n number = 4\n return number", "def num(s: str):\n try: return int(s)\n except ValueError: return float(s)", "def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)", "def to_numeric(s):\n\n try:\n s = float(s)\n except Exception as e:\n log.debug('Caught `{e!s}` trying to cast {s!r} to numeric'.format(**locals()))\n pass\n return s", "def to_number(param, in_str):\n try:\n return float(in_str)\n except ValueError:\n return exit_msg(f\"Bad Request: Wrong type, expected 'number' for parameter '{param}'\")", "def name_to_number(name):\r\n \r\n if name == \"rock\":\r\n return 0\r\n elif name == \"Spock\":\r\n return 1\r\n elif name == \"paper\":\r\n return 2\r\n elif name == \"lizard\":\r\n return 3\r\n elif name == \"scissors\":\r\n return 4\r\n else:\r\n return \"Invalid!Enter any one of the following: rock,Spock,paper,lizard,scissors\"", "def _get_number_from_string(x):\n try:\n return float(x)\n except ValueError:\n raise ValueError('Unknown element')", "def parse_number(txt):\n return int(txt)", "def recvnumber(self):\n\n data = self.recvraw()\n try:\n return int(data)\n except ValueError:\n try:\n return float(data)\n except ValueError:\n return complex(data)", "def parse(value):\n return int(value)", "def _parseNumber(self, str):\r\n\t\tif (str.count(\".\") == 0):\r\n\t\t\treturn int(str)\r\n\t\tif (str.count(\".\") == 1):\r\n\t\t\treturn float(str)\r\n\t\treturn str", "def _convert_val_to_numeric(val, cast_type=float, regex_exp='[^0-9.-()]'):\n multiplier = 1\n\n try:\n stripped = regex.sub(regex_exp, '', val)\n if len(stripped) >= 2 and stripped[0] == '(' and stripped[-1] == ')':\n multiplier = multiplier*-1\n ans = cast_type(stripped[1:-1])\n else:\n ans = cast_type(regex.sub(regex_exp, '', val))\n ans = ans * multiplier\n except ValueError:\n ans = pd.NA\n print(f\"ERROR READING VALUE:\\\"{val}\\\"\\t Filling with <NA>\")\n except TypeError:\n if type(val) in [float, int, cast_type]:\n ans = cast_type(val)\n else:\n print(f\"ERROR READING VALUE:\\\"{val}\\\"\\t Filling with <NA>\")\n ans = pd.NA\n return ans", "def decode(self, number: int) -> typing.Union[int, str]:\n return number", "def str2num(numstr):\n require_type(isa(numstr,str), 'parameter of string->number must be a string')\n return transform(numstr)", "def decode(val):\n if isinstance(val, Decimal):\n return float(val)\n return val" ]
[ "0.6208457", "0.5929694", "0.5901976", "0.58474815", "0.57029825", "0.5638023", "0.55621403", "0.554133", "0.55332035", "0.5522894", "0.5487925", "0.54466546", "0.54262304", "0.53985983", "0.5384949", "0.537553", "0.5372519", "0.53687495", "0.53624064", "0.53583896", "0.5352509", "0.5350335", "0.5315453", "0.5315345", "0.53045833", "0.5283852", "0.5239324", "0.5225255", "0.52074", "0.5196419" ]
0.6216994
0
Add repository tagging into the output.
def AddRepositoryTags(prefix='', rid=True, date=True, revision=True, wrap=False): tags = [] wrapper = '"' if wrap else '' # Format print the '$' into the RCS tags in order prevent the tags from # being interpolated here. p4_id = '%s%sId:%s%s' % (wrapper, '$', '$', wrapper) p4_date = '%s%sDate:%s%s' % (wrapper, '$', '$', wrapper) p4_revision = '%s%sRevision:%s%s' % (wrapper, '$', '$', wrapper) if rid: tags.append('%s%s' % (prefix, p4_id)) if date: tags.append('%s%s' % (prefix, p4_date)) if revision: tags.append('%s%s' % (prefix, p4_revision)) return tags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tagger():", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def tag_repo(deploy_info, location=os.getcwd()):\n\n ensure_dir(location)\n with utils.cd(location):\n cmd = \"\"\"\n /usr/bin/git tag -fa \\\\\n -m 'user {0}' \\\\\n -m 'timestamp {1}' -- \\\\\n {2} {3}\n \"\"\".format(\n deploy_info['user'],\n deploy_info['timestamp'],\n deploy_info['tag'],\n deploy_info['commit']\n )\n subprocess.check_call(cmd, shell=True)", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def __gitDescribeTag(self):\n self.vcs.gitDescribe(self.project.getProjectPath(), [])", "def addTag(self, repoType, txt, addSlash=True, project=0):\n # remove slash\n while txt.startswith(\"/\"):\n txt = txt[1:]\n \n # add fix to support & in filename, ampersand is used \n # as a shortcut for the tab by pyqt\n txt = txt.replace(\"&\", \"&&\")\n # end of fix\n \n if repoType == UCI.REPO_TESTS_LOCAL:\n repo = \"local-tests\"\n elif repoType == UCI.REPO_TESTS:\n repo = \"remote-tests\"\n project_name = self.iRepo.remote().getProjectName(project=project)\n repo += '(%s)' % project_name\n elif repoType == UCI.REPO_ADAPTERS:\n repo = \"remote-adapters\"\n elif repoType == UCI.REPO_LIBRARIES:\n repo = \"remote-libraries\"\n elif repoType == UCI.REPO_UNDEFINED:\n repo = \"undefined\"\n else:\n repo = \"unknown\"\n self.error( \"repo unknown: %s\" % repoType )\n if addSlash:\n if repoType == UCI.REPO_TESTS_LOCAL:\n ret = \"%s:%s\" % (repo, txt) \n else:\n ret = \"%s:/%s\" % (repo, txt)\n else:\n ret = \"%s: %s\" % (repo, txt) \n return ret", "def tag(self, repository: str, tag: Optional[str], force: bool = False) -> bool:\n _ = force\n\n params = {\"repo\": repository}\n if tag is not None:\n params[\"tag\"] = tag\n\n response = self.client.post(f\"/images/{self.id}/tag\", params=params)\n\n if response.status_code == 201:\n return True\n\n error = response.json()\n if response.status_code == 404:\n raise ImageNotFound(error[\"cause\"], response=response, explanation=error[\"message\"])\n raise APIError(error[\"cause\"], response=response, explanation=error[\"message\"])", "def tags():", "def process_repo(vb, options):\n if not options.repo:\n return\n\n vb.add_repo(options.repo_os, options.repo_id, options.repo_name, options.repo_url,\n options.unique, options.repo_tags)", "def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)", "def add_tag(tag):\n check_call(['git', 'tag', tag])", "def show_tags(config, args):\n for item in lib.input_json_lines():\n yield config.repo.tag(item)", "def commit(self, repository, tag=None, push=False, **kwargs):\n logger.debug('Committing `%s` with container id %s ...', self.fqdn, self.container.short_id)\n image = self.container.commit(repository=repository, tag=tag, **kwargs)\n logger.debug('%s repo tags committed with image id as %s', image.tags, image.short_id)\n if push:\n logger.debug('Pushing image of `%s` to repository %s ...', self.fqdn, repository)\n for line in client.api.push(repository, tag, stream=True, decode=True):\n line.pop('progressDetail', None) # take out too much detail\n logger.debug(line)\n logger.debug('%s repo tags pushed for `%s`, whose image id is %s',\n image.tags, self.fqdn, image.short_id)", "def commit(self, repository=None, tag=None, **kwargs):\n\n resp = self.client.api.commit(self.id, repository=repository, tag=tag,\n **kwargs)\n return self.client.images.get(resp['Id'])", "def add_tagging(self, task_instance):", "def push(self, repo, tag):\n logger.info(\"Pushing Docker image {}:{}\".format(repo, tag))\n stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)", "def cmd_gallery_tag(client, args):\n gallery_tag = client.gallery_tag(args.tag, args.sort, args.page, args.window)\n data = gallery_tag.__dict__\n data['items'] = [item.__dict__ for item in data['items']]\n generate_output({'gallery_tag': data})", "def tag(self, image, repo, tag):\n check_blacklist(repo)\n logger.info(\"Tagging Docker image {} as {}:{}\".format(image, repo, tag))\n if not self.client.tag(image, repo, tag=tag, force=True):\n raise docker.errors.DockerException(\"tagging failed\")", "def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )", "def tag(self, *arguments, **kwargs):\n return self.get_output('tag', *arguments, **kwargs)", "def do_version_tag(args, image_name_tag, image_name):\n if args.versiontag is True:\n date_stamp = \"{:%Y%m%d%H%M%S}\".format(datetime.now())\n version_tag = args.tag + '-' + date_stamp\n image_name_version_tag = f\"{image_name}:{version_tag}\"\n return_code = tag(image_name_tag, image_name_version_tag)\n if return_code == 0:\n push(args, image_name_version_tag)", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def tags_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"tags\", access_token)", "def cmd_gallery_tag_image(client, args):\n gallery_tag_image = client.gallery_tag_image(args.tag, args.image_id)\n data = gallery_tag_image.__dict__\n generate_output({'gallery_tag_image': data})", "def add_tag(self, tag):\n self.tags.append(tag)", "def add_tags(event):\n\n add_tags_from_presets()", "def add(self, tag):\n self.tags[tag.name] = tag", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def tagged(msg_id, tag_name, build_nvr):\n if conf.system not in (\"koji\", \"test\"):\n return []\n\n # Find our ModuleBuild associated with this tagged artifact.\n module_build = models.ModuleBuild.get_by_tag(db_session, tag_name)\n if not module_build:\n log.debug(\"No module build found associated with koji tag %r\", tag_name)\n return\n\n # Find tagged component.\n component = models.ComponentBuild.from_component_nvr(\n db_session, build_nvr, module_build.id)\n if not component:\n log.error(\"No component %s in module %r\", build_nvr, module_build)\n return\n\n log.info(\"Saw relevant component tag of %r from %r.\", component.nvr, msg_id)\n\n # Mark the component as tagged\n if tag_name.endswith(\"-build\"):\n component.tagged = True\n else:\n component.tagged_in_final = True\n db_session.commit()\n\n if any(c.is_unbuilt for c in module_build.current_batch()):\n log.info(\n \"Not regenerating repo for tag %s, there are still building components in a batch\",\n tag_name,\n )\n return []\n\n # If all components are tagged, start newRepo task.\n if not any(c.is_completed and not c.is_tagged for c in module_build.up_to_current_batch()):\n builder = GenericBuilder.create_from_module(\n db_session, module_build, conf)\n\n if any(c.is_unbuilt for c in module_build.component_builds):\n if not _is_new_repo_generating(module_build, builder.koji_session):\n repo_tag = builder.module_build_tag[\"name\"]\n log.info(\"All components in batch tagged, regenerating repo for tag %s\", repo_tag)\n task_id = builder.koji_session.newRepo(repo_tag)\n module_build.new_repo_task_id = task_id\n else:\n log.info(\n \"newRepo task %s for %r already in progress, not starting another one\",\n str(module_build.new_repo_task_id), module_build,\n )\n else:\n # In case this is the last batch, we do not need to regenerate the\n # buildroot, because we will not build anything else in it. It\n # would be useless to wait for a repository we will not use anyway.\n log.info(\n \"All components in module tagged and built, skipping the last repo regeneration\")\n from module_build_service.scheduler.handlers.repos import done as repos_done_handler\n events.scheduler.add(\n repos_done_handler, (\"fake_msg\", builder.module_build_tag[\"name\"]))\n db_session.commit()", "def addTag(id = 0):\n\tinsertQueries.addTag(request.form)\n\tresults = queries.package(id)\n\treturn render_template('package.html', package=results)" ]
[ "0.64183354", "0.61213785", "0.6051812", "0.5944593", "0.591629", "0.578211", "0.57157874", "0.5659281", "0.5655233", "0.56378603", "0.5632978", "0.5630898", "0.55990887", "0.55730367", "0.5563395", "0.5534452", "0.55233854", "0.55143166", "0.548826", "0.54208285", "0.5376026", "0.53751767", "0.5344717", "0.5325035", "0.52937084", "0.5262866", "0.52510697", "0.52334917", "0.52161163", "0.5209795" ]
0.6521547
0
Return a list of occupant types for all huts. This is mainly used for printing information on current status of the hut (whether unoccupied or acquired etc) If the occupant is not `None` the occupant type will be 'enemy' or 'friend'. But if there is no occupant or is already 'acquired' the occupant_type will display that information instead. See `Hut.get_occupant_type()` for more details. Return a list that collects this information from all the huts. This is a list comprehension example. More on the list comprehension in a later chapter on Performance.
def get_occupants(self): return [x.get_occupant_type() for x in self.huts]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getEmergencyTypes(self):\n types_list = []\n\n data = await self.director.getItemInfo(self.item_id)\n jsonDictionary = json.loads(data)\n\n if jsonDictionary[0][\"capabilities\"][\"has_fire\"]:\n types_list.append(\"Fire\")\n if jsonDictionary[0][\"capabilities\"][\"has_medical\"]:\n types_list.append(\"Medical\")\n if jsonDictionary[0][\"capabilities\"][\"has_panic\"]:\n types_list.append(\"Panic\")\n if jsonDictionary[0][\"capabilities\"][\"has_police\"]:\n types_list.append(\"Police\")\n\n return types_list", "def occupy_huts():\n huts = []\n occupants = ['enemy', 'friend', 'unoccupied']\n while len(huts) < 5:\n computer_choice = random.choice(occupants)\n huts.append(computer_choice)\n return huts", "def ancestry_iris(self):\n return list(self._class_types)", "def _occupy_huts(self):\n for i in range(5):\n choice_lst = ['enemy', 'friend', None]\n computer_choice = random.choice(choice_lst)\n if computer_choice == 'enemy':\n name = 'enemy-' + str(i+1)\n self.huts.append(Hut(i+1, OrcRider(name)))\n elif computer_choice == 'friend':\n name = 'knight-' + str(i+1)\n self.huts.append(Hut(i+1, Knight(name)))\n else:\n self.huts.append(Hut(i+1, computer_choice))", "def calculate_occupancy(self):\n # TODO will need to be fixed now that using a dict and changed thresholds\n self.occupancy = [list(x > self.t for x in y) for y in self.counts]\n return self.occupancy", "def getOccupied(self):\n occupiedList = []\n for spot in self.parkingSpots:\n if spot.status == 'occupied':\n occupiedList.append(spot)\n return occupiedList", "def print_occupants(self):\n for num, member in enumerate(self.occupants, start=1):\n print(num, member.name)", "def get_types():\n \n attacker = input('Attacker: ')\n defender = input('Defender: ')\n return attacker.upper(), defender.upper()", "def health_titans():\r\n return [titan.health for titan in Titan.titans]", "def antennasByType(ants=0) :\n ovroAnts = []\n bimaAnts = []\n szaAnts = []\n antlist = makeAntList(ants)\n for a in antlist :\n if device.CarmaAnt().isOvro(a) : ovroAnts.append(a)\n elif device.CarmaAnt().isBima(a) : bimaAnts.append(a)\n elif device.CarmaAnt().isSza(a) : szaAnts.append(a)\n else : raise Exception, '%s is a bogus antenna number!' %a\n return [ovroAnts,bimaAnts,szaAnts]", "def display_injury_type(self):\n return ', '.join(type_of_injury.name for genre in self.type_of_injury.all()[:3])", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def tileTypes(self):\n types = []\n\n for type_ in getAllUtilitiesRegisteredFor(ITileType):\n if checkPermission(type_.add_permission, self.context):\n types.append(type_)\n\n types.sort(self.sortKey)\n return types", "async def incidentTypes(self, includeHidden: bool = False) -> Iterable[str]:", "def test_topo_current_occupants_int():\n instance = topo.Topography()\n assert type(instance.current_occupants()[\"Herbivores\"]) == int and type(\n instance.current_occupants()[\"Carnivores\"]) == int", "def listAffiliationType(self):\n return self.get_json('/affiliationType')", "def abilities_all_types():\r\n\r\n ability_mods = abilities_gen_mods()\r\n\r\n with patch(\"funclg.utils.data_mgmt.id_gen\", side_effect=ability_ids()):\r\n all_abilities = []\r\n for index, a_type in enumerate(ABILITY_TYPES):\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=f\"Ability_{index}\",\r\n ability_type=a_type,\r\n description=f\"{a_type} ability\",\r\n mod=ability_mods[a_type],\r\n )\r\n )\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=\"Ability_Error_NoMod\",\r\n ability_type=\"Error\",\r\n description=\"Error ability\",\r\n )\r\n )\r\n return all_abilities", "def getImmediatelyAddableTypes(self, context=None):\n return self.getLocallyAllowedTypes()", "def listOrganizationTypes(self):\n return self.get_json('/organizationType')", "def get_measurement_types():\n\n all_measures = ['temperature', 'humidity', 'pressure']\n\n ####################\n return all_measures\n ####################", "def list_mission_types(self):\n return self._missions_service.list_mission_types()", "def amenities(self):\n G, mapping = self.network()\n waste = []\n resources = []\n intmed_products = []\n\n for nd in G:\n # if nd[0] != \"r\":\n if not isinstance(nd, int):\n if not G.in_edges(nd):\n resources.append(nd)\n elif not G.out_edges(nd):\n if nd != self.commodity:\n waste.append(nd)\n else:\n intmed_products.append(nd)\n\n return waste, resources, intmed_products", "def get_occupant(self):\n\t\tpass", "def getLocallyAllowedTypes(self):\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and\n t.isConstructionAllowed(self)]", "def get_type_of_habit(self,obj):\n type_of_habit = obj.habit.type_of_habit\n return type_of_habit", "def get_occupant(self):\n\t\treturn self.occupant", "def _inferred_type_levels(self) -> list[str]:\n return [i.inferred_type for i in self.levels]", "def attack_sets(self):\n # TODO These should be component queries\n attack_sets = []\n if self.host.equipment:\n attack_sets.append(\n attacks.AttackSet(\n attacks.WeaponAttack, amount=len(self.host.equipment.get_wielded_grasp_slots())\n )\n )\n\n if self.host.body:\n attack_sets.extend(self.host.body.get_attacks())\n\n if self.host.monster and self.host.monster.base_monster.attack_sets:\n attack_sets.extend(\n self.host.monster.base_monster.attack_sets\n )\n\n return attack_sets", "def etypes(self) -> Sequence[str]:\n\n return [can_etype[1] for can_etype in self.canonical_etypes]", "def get_robot_occupancy(self): \n occupancy = np.zeros(self.no_robots)\n for i in range(self.no_robots):\n status_topic = '/robot_' + str(i) + '/move_base/status'\n msg = rospy.wait_for_message(status_topic, GoalStatusArray)\n msg_list = msg.status_list\n if msg_list == []:\n occupancy[i] = 0\n else:\n if len(msg_list) > 1:\n robot_status = msg_list[-1].status\n else:\n robot_status = msg_list[0].status\n\n if (robot_status == 1) or (robot_status == 0) or (robot_status == 7): # BUG pazi tuki je lahko se kaksna fora ker je teh statusov like 10\n occupancy[i] = 1 # robot on move\n else:\n occupancy[i] = 0 # robot on goal\n return occupancy" ]
[ "0.6243142", "0.56909764", "0.5659106", "0.56577206", "0.544552", "0.5411192", "0.51990503", "0.5154871", "0.51183385", "0.5096307", "0.50672597", "0.50460696", "0.49878705", "0.49688363", "0.49500138", "0.49298483", "0.4927684", "0.49132818", "0.4906834", "0.4894242", "0.48886076", "0.48859927", "0.4818391", "0.48064232", "0.47882268", "0.47743315", "0.47422883", "0.47396016", "0.47337124", "0.47289324" ]
0.82086855
0
Print the game mission in the console
def show_game_mission(self): print_bold("Mission:") print(" 1. Fight with the enemy.") print(" 2. Bring all the huts in the village under your control") print("---------------------------------------------------------\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_mission(self): \n #Download mission from vehicle\n missionlist = self.download_mission()\n \n #Add commands\n for cmd in missionlist:\n commandline=\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)\n print commandline", "def show_game_mission():\n print_bold(\"任务:\")\n print(\"\\t选择李维可以休息的小屋...\")\n print_bold(\"TIP:\")\n print(\"保持警惕,周围有敌人!\")\n print_dotted_line()", "def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()", "def text_output(self):\n print(self.board)\n print()", "def print_start_game():\n print(HANGMAN_ASCII_ART)\n print(MAX_TRIES)", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "def print_out():\n pass", "def titulo():\n print(\"\\n\" + Fore.WHITE + \" \"*70 + \"SAMAN\")\n sleep(1)\n print(\" \"*75 + \"GAMES\\n\" + Fore.RESET)\n sleep(1)\n print(\" \"*71 + Fore.LIGHTBLACK_EX + \"Presenta\\n\" + Fore.RESET)\n sleep(1.25)\n print(\" \"*66 + Fore.LIGHTBLUE_EX + \"███ \" + \"BATTLESHIP\" + \" ███\" + Fore.RESET)\n sleep(1)", "def print_game_over():\n print()\n print(\" _____ __ __ ______ ______ ________ _____ \")\n print(r\" / ____| /\\ | \\/ | ____| / __ \\ \\ / / ____| __ \\ \")\n print(r\" | | __ / \\ | \\ / | |__ | | | \\ \\ / /| |__ | |__) |\")\n print(r\" | | |_ | / /\\ \\ | |\\/| | __| | | | |\\ \\/ / | __| | _ / \")\n print(r\" | |__| |/ ____ \\| | | | |____ | |__| | \\ / | |____| | \\ \\ \")\n print(r\" \\_____/_/ \\_\\_| |_|______| \\____/ \\/ |______|_| \\_\\\\\")\n print()", "def printStartMsg(self):\n\n print(\"\\nSTARING THE GAME\")\n print(\"HAVE FUN!\\n\")", "def print_env_information(step_id, current_time, final_move, current_score, current_reward):\n print(\"Step: {}\".format(step_id))\n print(\"Current Time: {}\".format(current_time))\n print(\"Action: {}\".format(final_move))\n print(\"Current scenario score: {} \\nCurrent reward: {}\\n\".format(current_score, current_reward))", "def printInstructions(self):\n print(\"\"\"•\tAim of the Game is to be the first to lose all of your chips\n•\tPlayers are put in order of the lowest to \nhighest based on their first roll\n(This is done automatically when you enter your name)\n• You start out with 5 chips.\n• When it is your turn you roll the die.\n\\t•\tIf the space with the same number as the die is empty (value of 0),\n\\t\\tput a chip there.\n\\t•\tbut if there already is a chip there (value of 1), you must take it.\n\\t•\tIf you roll a 6, you always put one of your chips on the space number 6 – \n\\t\\tregardless of how many chips are there already. \n\\t\\tChips on space number 6 are out of the game,\n\\t\\tand you never pick these up again.\n\"\"\")", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def print_problem(self):\n print('\\n*****************')\n print('PROBLEM: ' + self.problem)\n print('OBJECTS: ' + str(self.objects))\n print('INIT: ' + str(self.init))\n print('GOAL: ' + str(self.goal))\n print('AGENTS: ' + str(self.agents))\n print('****************')", "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "def print_action(player):\n [print(str(i)+': '+player.available_actions[i]) for i in range(len(player.available_actions))]", "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')", "def print_fight_status():\n printmessage(\"You're fighting with a %s\" % ITEMS[0], 3, RED, 0)\n printmessage(\"You feel like you're %s\" % get_strength_text(STRENGTHVAL), 4, GREEN, 0)\n printmessage(\"The bear looks like he is %s\" % get_strength_text(BEARSTRENGTHVAL), 5, MAGENTA, 0)\n printmessage(\"Your food supply is %s\" % get_hunger_text(HUNGERVAL), 6, YELLOW, 0)", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def show_game_status(self, game, diff, step):\n if self.verbose:\n print('========== Step {} =========='.format(step))\n print('Time cost ===> {:.3f}s'.format(diff))\n game.print_game()", "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def printTurn(self,board,tile):\n if tile == board.BLACK:\n print \"\\n\\nBlack turn 'O'\"\n else:\n print \"\\n\\nWhite turn 'X'\"", "def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def display_hangman(self):\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(self.progress + Style.RESET_ALL)\n print('\\n')", "def instructions():\n\t\n\tprint \\\n\t\"\"\"\n\tToday we will play the perennial favorite game of...\n\tRock! Paper!! Scissors!!!.\n\tThe objective of the game is to outthink your opponent (in this case me) and defeat.\n\tThe rules are very simple\n\t1. Paper covers the Rock\n\t2. Rock breaks the Scissors\n\t3. Scissors cut the Paper\n\t\n\tChoose your move from the following:\n\t1. Paper (p)\n\t2. Rock (r)\n\t3. Scissors (s)\n\t\n\tAre you ready? Alright then, let's play...\n\t\"\"\"" ]
[ "0.8074101", "0.7802414", "0.7799927", "0.6782357", "0.65915364", "0.6574719", "0.6516535", "0.6389265", "0.6313511", "0.6305886", "0.6258949", "0.62241596", "0.6221208", "0.619874", "0.61928886", "0.6130327", "0.61183006", "0.61097413", "0.608984", "0.60363644", "0.60332423", "0.6016326", "0.60144436", "0.5996172", "0.5983194", "0.5976371", "0.5943957", "0.592934", "0.5917232", "0.5916978" ]
0.80750346
0
Process the user input for choice of hut to enter Returns the hut number to enter based on the user input. This method makes sure that the hut number user has entered is valid. If not, it prompts the user to reenter this information.
def _process_user_choice(self): verifying_choice = True idx = 0 print("Current occupants: %s" % self.get_occupants()) while verifying_choice: user_choice = raw_input("Choose a hut number to enter (1-5): ") # -------------------------------------------------------------- # try...except illustration for chapter on exception handling. # (Attack Of The Orcs v1.1.0) # -------------------------------------------------------------- try: idx = int(user_choice) except ValueError as e: print("Invalid input, args: %s \n" % e.args) continue try: if self.huts[idx-1].is_acquired: print("You have already acquired this hut. Try again." "<INFO: You can NOT get healed in already acquired hut.>") else: verifying_choice = False except IndexError: print("Invalid input : ", idx) print("Number should be in the range 1-5. Try again") continue return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getHenhouseDisplayMenuChoice ():\r\n while True :\r\n try :\r\n choice = int(input('Select an option: '))\r\n if 0 <= choice <= 2 :\r\n break \r\n else :\r\n print('Please enter a valid option')\r\n except ValueError :\r\n print('Please enter a valid option')\r\n return(choice)", "def manageHenhouse () :\r\n print('Welcome to henhouse management')\r\n print()\r\n while True :\r\n henhouseDisplayMenu ()\r\n option = getHenhouseDisplayMenuChoice ()\r\n print()\r\n DefaultHenHouse = henhouse(0,0,0,0,0,0)\r\n if option == 1 :\r\n while True :\r\n try:\r\n numberOfChicken = int(input('Enter number of chicken: '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of chicken')\r\n while True :\r\n try:\r\n numberOfChicks = int(input('How many of them are chicks (younger than 24 weeks)? '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of chicks')\r\n while True :\r\n try:\r\n numberOfٌRoasters = int(input('How many of the mature chicken are roasters? '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of roasters') \r\n while True :\r\n try:\r\n numberOfDucks = int(input('Enter number of ducks: '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of ducks')\r\n while True :\r\n try:\r\n numberOfyoungDucks = int(input('How many of them are yound ducks (younger than 24 weeks)? '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of young ducks')\r\n while True :\r\n try:\r\n numberOfٌMaleDucks = int(input('How many of the mature ducs are males? '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of mail ducks') \r\n while True :\r\n try:\r\n numberOfDays = int(input('Enter number of days: '))\r\n break\r\n except ValueError :\r\n print('Value not valid.Please enter number of days')\r\n henHouseToManage = henhouse(numberOfChicken,numberOfChicks,numberOfDucks,numberOfyoungDucks,numberOfٌRoasters,numberOfٌMaleDucks)\r\n print()\r\n print(henHouseToManage.eggProduction (numberOfDays))\r\n print()\r\n elif option == 2 :\r\n try :\r\n print(henHouseToManage.needs ())\r\n except :\r\n print(DefaultHenHouse.needs ())\r\n elif option == 0 :\r\n break\r\n print()\r\n print('Thank you for using the henhouse management program')", "def handle_menu(self, text) : \r\n\r\n choose = \"\"\r\n \r\n while choose not in [1,2,3] :\r\n \r\n \r\n try :\r\n \r\n choose = int(input(text))\r\n\r\n if ( choose not in [1,2,3] ) :\r\n print(\"number not recognize\")\r\n\r\n except ValueError: #dans le cas ou il tape autre chose qu'un INT \r\n\r\n print(\"you have to choose between 1 , 2 and 3\")\r\n\r\n\r\n return choose", "def display_menu():\n print(\"Press 1 to purchase stocks\\n\")\n print(\"\\nPress 2 to visualize the total prices of selected stocks over the period of time imported from a json file\\n\")\n print(\"\\nPress 0 to quit\\n\")\n try:\n response = int(input(\"\\nwaiting for Input: \"))\n if response < 0 or response > 2:\n return \"Please input a value between 0 and 2\"\n except:\n print(\"Please enter the numeric values specified in the menu\")\n else:\n return response", "def _take_option(self, options, print_out):\n user_choice = input(\"Please, choose one of the follewing options: \\n \" + print_out \n + \"\\n Your choice: \" )\n try:\n user_option = options[int(user_choice)]\n except KeyError:\n print(\"Please enter a vaild number\")\n self._take_option(options, print_out)\n \n except ValueError:\n print(\"Please a enter vaild number, not a string or some signs\")\n self._take_option(options, print_out)\n else:\n return user_option()", "def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice", "def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number", "def show_hr_menu():\n no_input = True\n while no_input:\n print('\\nPlease select from the following options:\\n')\n print('1. View / approve pending applications')\n print('2. View approved applications')\n print('3. View rejected applications\\n')\n choice = input('Please enter 1, 2 or 3 or Q to quit \\n')\n if choice in ('1', '2', '3'):\n no_input = False\n return choice\n elif choice.lower() == 'q':\n logout()\n is_invalid()", "def get_input(self):\n option = input(\"Enter the number of your choice: \")\n return option", "def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message", "def user(num):\n while True:\n print(\"Option: {}\".format(num))\n\n line = input()\n\n try:\n if line[0] == 'h':\n print(help_message)\n else:\n white, black = map(int, line.split())\n return white, black\n except:\n print(invalid_option.format(line))", "def user_choice():\n number_choice=50 #for enter in a loop\n while number_choice < 0 or number_choice > 49:\n try:\n number_choice=int(input(\"enter number between 0 and 49 :\")) #ask user a number and convert it in integer\n except ValueError: # if number_choice not a number\n print(\"your enter is not a number\") #display error message\n number_choice = 50 #return in a loop\n if number_choice < 0 or number_choice >49:\n print(\"your enter is not included in range\") #display error message if number is out of range\n return number_choice", "def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()", "def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")", "def getNumber(prompt):\n output = input(prompt)\n if output.lower() == 'exit':\n return -1\n while output.isdigit() == False or int(output) > 9 or int(output) < 1:\n output = input(prompt)\n return int(output)", "def user_input():\n #Error messages\n num_invalid = \"Invalid input, please insert a valid number\"\n str_invalid = \"Invalid input, please try again following the input conventions requested\"\n\n #Model Type\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n while model_type not in {'ib', 'sc'}:\n print(str_invalid)\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n\n #Compound\n compound = input(\"What compound or drug are you using?\")\n \n #Dose Type\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n while dose_type not in {\"c\",\"i\",\"r\"}:\n print(str_invalid)\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n\n if dose_type == 'c':\n while True:\n try:\n dose = float(input(\"What is the dose of \" + compound + \" that you want to test? (units in ng per hour): \"))\n break\n except:\n print(num_invalid)\n dose_mass = None\n time_dose = None\n num_dose = None\n \n elif dose_type == 'i':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n dose = None\n time_dose = None\n num_dose = None\n\n elif dose_type == 'r':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n time_dose = float(input(\"What time period are the doses given over? (units in hours): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n num_dose = float(input(\"How many doses are given? - this program assumes that doses are evenly spaced throughout the time period: \"))\n break\n except:\n print(num_invalid)\n dose = None\n \n #Length of simulation time\n while True:\n try:\n len_assay = float(input(\"What time period would you like to simluate the model? (units in hours): \"))\n break\n except:\n\t print(num_invalid)\n \n #Interval times\n while True:\n try:\n len_interval = float(input(\"What interval time would you like in the simulation? (units in hours): \"))\n break\n except:\n print(num_invalid)\n\n #clearance\n while True:\n try:\n clearance = float(input(\"What is the clearance rate? (units in ng/hour): \"))\n break\n except:\n print(num_invalid)\n\n \n #compartments\n compartments = []\n\n if model_type == \"ib\":\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n\n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n\n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n\n compart_list = None\n \n elif model_type == \"sc\":\n while True:\n try:\n sub_compart = input(\"Enter volume (L), transition rate (ng/hour) for the sub compartment (all seperated by spaces - eg: 5 25 ): \")\n sub_compart_split = sub_compart.split()\n sub_compart_split = [float(i) for i in sub_compart_split]\n break\n except:\n print(str_invalid)\n\n sub_compart_split.append(str(\"Sub\"))\n compartments.append(sub_compart_split)\n\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n \n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n \n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n compart_list = None\n\n #visualisation\n vis = input(\"Would you like to generate a graph? (Y/N): \")\n while vis not in {'Y','y','N','n'}:\n print(str_invalid)\n vis = input(\"Would you like to generate a graph? (Y/N): \") \n\n #unix timestamp\n curr_datetime = time.time()\n curr_datetime = str(curr_datetime)\n\n\n print(\"Thank you! Building model, please wait...\")\n\n\n return {\n 'model_type': model_type,\n 'compound': compound,\n 'dose_type': dose_type,\n 'dose':dose,\n 'dose_mass': dose_mass,\n 'time_dose': time_dose,\n 'num_dose': num_dose,\n 'len_assay':len_assay,\n 'len_interval':len_interval,\n 'clearance':clearance,\n 'compartments':compartments,\n 'vis':vis,\n 'curr_datetime':curr_datetime\n }", "def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")", "def enter_time_spent():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'time_spent': ''}\n\n while not valid_data:\n input_data['time_spent'] = input(\"Time spent on task (rounded minutes) : \")\n if re.match('\\d+', input_data['time_spent']):\n valid_data = True\n clean_scr()\n\n return input_data['time_spent']", "def hours_studied(self):\n value = input(\"Enter value (or 'exit')>>>\")\n while not self.is_float(value):\n value = input(\"Enter value (or 'exit')>>>\")\n\n # Escape command\n if value == 'exit':\n return value\n\n return float(value)", "def prompt_number(self):\r\n self.area_code = int(input(\"Area Code: \"))\r\n self.prefix = int(input(\"Prefix: \"))\r\n self.suffix = int(input(\"Suffix: \"))", "def difficulty():\n\n clear()\n print(logo)\n\n choice = \"\"\n level_dict = {\n \"easy\": 10,\n \"hard\": 5,\n \"lottery\":1,\n }\n \n choice = input('''Please choose a level:\n \"easy\" = 10 tries\n \"hard\" = 5 tries\n \"lottery\" = 1 try\n > ''').lower()\n\n while choice not in level_dict:\n choice = input(\"Please enter a valid level: \").lower()\n\n choice = level_dict[choice] \n return choice", "def NumberPick():\n hilo = 'lowest'\n num_list = []\n High = None\n while True:\n if len(num_list) == 2:\n break\n if High:\n hilo = 'highest'\n vanilla = f'Type in the {hilo} number: '\n nums = input(vanilla)\n if nums.isdigit():\n num_list.append(int(nums))\n High = True\n else:\n print('Enter only numbers.')\n print(\"The chosen number is... \" + str(randint(min(num_list), max(num_list))) + \"!\")\n start = input(\"Start again? \").lower()\n if start.startswith('y'):\n NumberPick()\n elif start.startswith('n') or QuBa(start):\n return", "def concertTicket():\n\n\n p =(input (\"What is the ticket price?\"))\n price = float (p)\n\n w = (input (\"What is your hourly wage?\"))\n wage = float (w)\n\n h = price/wage\n hours = round(h,2)\n\n print (\"You need to work\", hours, \"hours to buy your\",\n \"ticket\")", "def user_prompt():\n\n # JSON VARIABLES FROM MENU\n espresso_water = MENU[\"espresso\"][\"ingredients\"][\"water\"]\n espresso_coffee = MENU[\"espresso\"][\"ingredients\"][\"coffee\"]\n espresso_cost = MENU[\"latte\"][\"cost\"]\n\n latte_water = MENU[\"latte\"][\"ingredients\"][\"water\"]\n latte_coffee = MENU[\"latte\"][\"ingredients\"][\"coffee\"]\n latte_milk = MENU[\"latte\"][\"ingredients\"][\"milk\"]\n latte_cost = MENU[\"latte\"][\"cost\"]\n\n cappuccino_water = MENU[\"cappuccino\"][\"ingredients\"][\"water\"]\n cappuccino_coffee = MENU[\"cappuccino\"][\"ingredients\"][\"coffee\"]\n cappuccino_milk = MENU[\"cappuccino\"][\"ingredients\"][\"milk\"]\n cappuccino_cost = MENU[\"cappuccino\"][\"cost\"]\n\n total_water = espresso_water + latte_water + cappuccino_water\n total_coffee = espresso_coffee + latte_coffee + cappuccino_coffee\n total_milk = latte_milk + cappuccino_milk\n total_cost = espresso_cost + latte_cost + cappuccino_cost\n\n acrued_money = 0\n\n repeat = True\n while repeat:\n choice = user_question()\n if choice == \"espresso\":\n if espresso_water > total_water:\n print(\"Sorry there's not enough water\")\n elif espresso_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n else:\n total_pennies = collect_money()\n if total_pennies > espresso_cost or total_pennies == espresso_cost:\n acrued_money += espresso_cost\n change = float(total_pennies - espresso_cost)\n change_dec = \"%.2f\" % change\n total_water -= espresso_water\n total_coffee -= espresso_coffee\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Espresso\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n elif choice == 'latte':\n if latte_water > total_water:\n print(\"Sorry there's not enough water\")\n elif latte_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n elif latte_milk > total_milk:\n print(\"Sorry there's not enough milk\")\n else:\n total_pennies = collect_money()\n if total_pennies > latte_cost or total_pennies == latte_cost:\n acrued_money += latte_cost\n change = float(total_pennies - latte_cost)\n change_dec = \"%.2f\" % change\n total_water -= latte_water\n total_coffee -= latte_coffee\n total_milk -= latte_milk\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Latte\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n\n elif choice == 'cappuccino':\n if cappuccino_water > total_water:\n print(\"Sorry there's not enough water\")\n elif cappuccino_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n elif cappuccino_milk > total_milk:\n print(\"Sorry there's not enough milk\")\n else:\n total_pennies = collect_money()\n if total_pennies > latte_cost or total_pennies == cappuccino_cost:\n acrued_money += cappuccino_cost\n change = float(total_pennies - cappuccino_cost)\n change_dec = \"%.2f\" % change\n total_water -= cappuccino_water\n total_coffee -= cappuccino_coffee\n total_milk -= cappuccino_milk\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Cappuccino\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n\n elif choice == 'report':\n print(f\"Water: {total_water}ml\\nMilk: {total_milk}ml\\nCoffee: {total_coffee}g\\nMoney: ${acrued_money}\")\n elif choice == 'off':\n repeat = False\n else:\n print(\"Wrong Input, Start Application again\")\n repeat = False", "def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt", "def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")", "def human_input(marbles_left):\n\twhile True:\n\t\ttry:\n\t\t\thuman_choice = int(input('Your turn: How many marbles will you remove (1-3)? '))\n\t\texcept:\n\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\treturn 0\n\t\telse:\n\t\t\tif human_choice not in range(1, 4):\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telif human_choice > marbles_left:\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\tprint('You removed {} marbles.'.format(human_choice))\n\t\t\t\treturn human_choice", "def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')", "def user_input_height():\n # Variable to use outcome globally in other functions\n global height\n # User height input\n height = input(f\"\\n\\033[1;32;10mHi {name.capitalize()} {enter_height}:\\n\")\n # If no characters in input -> message to user and repeat this function\n if is_char(height) is True:\n print(\"\\033[1;31;10mNo characters please type your height\")\n user_input_height()\n # Is special symbol -> message to user and repeat this function\n elif is_special_char(height) is True:\n print(\"\\033[1;31;10mPlease do not include special symbols.\")\n user_input_height()\n # Else if characters is letter -> message to user and repeat this function\n elif is_letter(height) is True:\n print(\"\\n\\033[1;31;10mPlease do not include letter in your height.\")\n user_input_height()\n # Else return height\n return height", "def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()" ]
[ "0.64109653", "0.62263894", "0.6194485", "0.6172862", "0.5975994", "0.5952109", "0.5927273", "0.589624", "0.5894847", "0.5838126", "0.57868975", "0.5775824", "0.57646203", "0.57619727", "0.57246244", "0.56762254", "0.56759924", "0.56673324", "0.5646656", "0.5640004", "0.5619711", "0.56171703", "0.560174", "0.55982786", "0.5594173", "0.55927116", "0.5587039", "0.5585782", "0.557727", "0.5574761" ]
0.72301716
0
Randomly occupy the huts with one of, friend, enemy or 'None'
def _occupy_huts(self): for i in range(5): choice_lst = ['enemy', 'friend', None] computer_choice = random.choice(choice_lst) if computer_choice == 'enemy': name = 'enemy-' + str(i+1) self.huts.append(Hut(i+1, OrcRider(name))) elif computer_choice == 'friend': name = 'knight-' + str(i+1) self.huts.append(Hut(i+1, Knight(name))) else: self.huts.append(Hut(i+1, computer_choice))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def occupy_huts():\n huts = []\n occupants = ['enemy', 'friend', 'unoccupied']\n while len(huts) < 5:\n computer_choice = random.choice(occupants)\n huts.append(computer_choice)\n return huts", "def randomHelmet():\n return random.choice(HELMETS)", "def get_red():\n # return name of actor, movement speed\n zombies = ['Zombie-1','Zombie-2','Zombie-3']\n return choice(zombies), randint(1,4)", "def enemychoice(self):\n choice = random.randint(0, 10)\n # when choice is 0 to 3, the enemy will attack\n # when choice is 4 to 7, the enemy will defend\n # when choice is 8, the enemy will do nothing\n # when choice is 9, the enemy will do a super move\n\n #Create the Booleans first, they will also automatically 'reset' them\n self.enemyattack = False\n self.enemydefend = False\n self.enemysuper = False\n\n if choice == 1 or choice == 2 or choice == 3:\n self.enemyattack = True\n elif choice == 4 or choice == 5 or choice == 6:\n self.enemydefend = True\n elif choice == 7 or choice == 8:\n None\n else:\n self.enemysuper = True", "def randomly_spawn_mothership(self) -> None:\n return", "def place_healing(self, probability=0.1):\r\n number = int((self.__nx * self.__ny) * probability) # probability of having a pit\r\n for i in range(number):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n if self.__maze[x][y] != self.entrance_room() and \\\r\n self.__maze[x][y] != self.exit_room():\r\n self.__maze[x][y].set_healing_potion(True)", "def testrandom(self):\n for i in range(100):\n WeaponAbility()", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def etoile():\n x, y = random.randint(0, MAXW), random.randint(0, MAXH)\n cercle(x, y, 2, 'red')", "def fight(who_fight=None):\r\n global monsters_defeated\r\n \r\n if isinstance(who_fight,helpful.Being):\r\n ###specific monster\r\n enemy = who_fight\r\n\r\n elif isinstance(who_fight,list):\r\n ###list of categories\r\n enemy = items_lists.random_monster(random.choice(who_fight))\r\n\r\n else:\r\n ###else picks a monster at random, not boss though\r\n enemy = items_lists.random_monster()\r\n \r\n\r\n\r\n # print 'fighting:\\n' + enemy.advanced_str()\r\n encountered = words.being_adj().capitalize() + ' ' + str(enemy)\r\n raw_input(str(player) + ' encounters a ' + encountered + '!\\n')\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n while choice == 'inventory':\r\n inspect_inventory()\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n if choice == 'yes':\r\n\r\n while enemy.get_health() > 0 and player.get_health() > 0:\r\n #player attacks\r\n item = helpful.pick_item(player.get_inventory(), 'What to use?')\r\n player.use(item)\r\n attack = item.get_damage()\r\n defend = item.get_health()\r\n\r\n if attack > 0:\r\n enemy.hit(item)\r\n raw_input('You dealt ' +str(attack) + ' damage!')\r\n elif defend > 0:\r\n raw_input('You gained ' + str(defend) + ' HP!')\r\n else:\r\n raw_input('That was pretty dumb.\\n')\r\n \r\n if enemy.get_health() > 0: #if the enemy is still alive\r\n\r\n ###enemy attacks, using random item in enemy's inventory\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n raw_input(str(enemy).capitalize() + ' used ' + str(enemy_choice) + '!\\n')\r\n raw_input('You lost ' + str(enemy_choice.get_damage()) + ' health!\\n')\r\n \r\n player.set_health(max(0,player.get_health())) #make health nonnegative\r\n enemy.set_health(max(0,enemy.get_health()))\r\n\r\n print('Player Health: ' + str(player.get_health()) + '\\n')\r\n raw_input(str(enemy) + ' Health: ' + str(enemy.get_health()) + '\\n')\r\n \r\n if enemy.get_health() == 0:\r\n winner = str(player)\r\n raw_input('You looted the following items:\\n' + enemy.get_inv_string())\r\n player.grab_items(enemy.get_inventory())\r\n result = 'win'\r\n monsters_defeated += 1\r\n\r\n if player.get_health() == 0:\r\n winner = str(enemy)\r\n result = 'death'\r\n\r\n print(winner + ' wins!\\n')\r\n\r\n elif choice == 'no':\r\n\r\n ouch = random.randrange(0,2)\r\n if enter_two == config.confus(config.config2):\r\n ouch = 0\r\n global cheated\r\n cheated = True\r\n print '<yolo>'\r\n if ouch:\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n print 'You got away, but were hit by the ' + \\\r\n str(enemy) +\"'s \" + str(enemy_choice) +'!' + '\\n'\r\n raw_input('You sustained ' + str(enemy_choice.get_damage()) +' damage.\\n')\r\n if player.get_health() <= 0:\r\n return 'death'\r\n else:\r\n raw_input('You got away safely!\\n\\nThat was close!\\n')\r\n result = 'lose'\r\n\r\n return result", "def random_legal_move():\n return random.choice(legal_moves())", "def ninja_turn():\r\n\tglobal men\r\n\tl = [chop, fly, firebreath]\r\n\tx = randint(0,3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn ninja.hit(*l[x])\r\n\telse:\r\n\t\tmen += ninja.sleep(*nsleep)\r\n\t\treturn 0", "def testrandom(self):\n for i in range(100):\n AmuletAbility()", "def random_strategy(player, board):\n return random.choice(Othello.legal_moves(player, board))", "async def randping(self, ctx):\r\n while True:\r\n memb = random.choice(ctx.guild.members)\r\n if not memb.bot:\r\n break\r\n memb = memb.mention\r\n await ctx.send(memb)", "def Thunder(robot):\n robot_return = Random_Actuation.myopic(\"Thunder\",robot,0.5,20) # with a %20 chance myopic will be triggered\n robot_return = Random_Actuation.hobbler(\"Thunder\",robot,0.5,20) # with a %20 change hobbler will be triggered\n return robot_return", "def someone():\n return random.choice(list(filter(lambda member: member != ADMIN, CHANNEL_MEMBERS)))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]", "def move_unhappy(self):\n ###your code here###\n n_unhappy = 0\n for person in self.people:\n if person.is_unhappy():\n old_home=person.home\n person.home.occupant = None\n new_home = self.empty_homes.pop(random.randrange(len(self.empty_homes)))\n new_home.occupant = person\n person.home = new_home\n self.empty_homes.append(old_home)\n n_unhappy += 1\n\n return n_unhappy", "async def eat(self, ctx, *, member : str = None):\r\n\r\n authorName = DisplayName.name(ctx.message.author)\r\n\r\n # Check if we're eating nothing\r\n if member == None:\r\n nothingList = [ 'you sit quietly and eat *nothing*...',\r\n 'you\\'re *sure* there was something to eat, so you just chew on nothingness...',\r\n 'there comes a time when you need to realize that you\\'re just chewing nothing for the sake of chewing. That time is now.']\r\n\r\n randnum = random.randint(0, len(nothingList)-1)\r\n msg = '*{}*, {}'.format(authorName, nothingList[randnum])\r\n msg = Nullify.clean(msg)\r\n await ctx.channel.send(msg)\r\n return\r\n\r\n # Check if we're eating a member\r\n memberCheck = DisplayName.memberForName(member, ctx.message.guild)\r\n if memberCheck:\r\n # We're eating a member - let's do a bot-check\r\n if memberCheck.id == self.bot.user.id:\r\n # It's me!\r\n memberList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',\r\n 'your mouth hangs open for a brief second before you realize that *I\\'m* eating *you*.',\r\n 'I\\'m a bot. You can\\'t eat me.',\r\n 'your jaw clamps down on... wait... on nothing, because I\\'m *digital!*.',\r\n 'what kind of bot would I be if I let you eat me?']\r\n elif memberCheck.id == ctx.message.author.id:\r\n # We're eating... ourselves?\r\n memberList = [ 'you clamp down on your own forearm - not surprisingly, it hurts.',\r\n 'you place a finger into your mouth, but *just can\\'t* force yourself to bite down.',\r\n 'you happily munch away, but can now only wave with your left hand.',\r\n 'wait - you\\'re not a sandwich!',\r\n 'you might not be the smartest...']\r\n else:\r\n memName = DisplayName.name(memberCheck)\r\n memberList = [ 'you unhinge your jaw and consume *{}* in one bite.'.format(memName),\r\n 'you try to eat *{}*, but you just can\\'t quite do it - you spit them out, the taste of failure hanging in your mouth...'.format(memName),\r\n 'you take a quick bite out of *{}*. They probably didn\\'t even notice.'.format(memName),\r\n 'you sink your teeth into *{}\\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.'.format(memName),\r\n 'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.'.format(memName)]\r\n randnum = random.randint(0, len(memberList)-1)\r\n msg = '*{}*, {}'.format(authorName, memberList[randnum])\r\n msg = Nullify.clean(msg)\t\t\t\t\r\n await ctx.channel.send(msg)\r\n return\r\n\r\n # Assume we're eating something else\r\n itemList = [ \t'you take a big chunk out of *{}*. *Delicious.*'.format(member),\r\n 'your teeth sink into *{}* - it tastes satisfying.'.format(member),\r\n 'you rip hungrily into *{}*, tearing it to bits!'.format(member),\r\n 'you just can\\'t bring yourself to eat *{}* - so you just hold it for awhile...'.format(member),\r\n 'you attempt to bite into *{}*, but you\\'re clumsier than you remember - and fail...'.format(member),]\r\n\r\n randnum = random.randint(0, len(itemList)-1)\r\n msg = '*{}*, {}'.format(authorName, itemList[randnum])\r\n msg = Nullify.clean(msg)\t\t\t\r\n await ctx.channel.send(msg)\r\n return", "def crit_ai(crit):\n if crit['type'] == 'crawler':\n # Crawlers move at random.\n return random.choice(['left','right','up','down'])\n #if crit['type'] == 'bullet':\n # return crit['dir']\n return None", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def generateFood():\n temp = [random.randint(0,7), random.randint(0,7)]\n\n snakePixels = snake.getPixels()\n\n #check that the food does not fall in the snake\n while temp in snakePixels:\n temp = [random.randint(0,7), random.randint(0,7)]\n\n return temp", "def randomLeggings():\n return random.choice(LEGGINGS)", "def make_random_move(self):\n \n\n if len(self.moves_made) == 56:\n return None\n\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n not_safe_moves = self.moves_made | self.mines\n\n while random_move in not_safe_moves:\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n return random_move", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def escolherLider():\n global lider\n lider = random.choice(participantes)" ]
[ "0.66497874", "0.6558114", "0.6469732", "0.64359075", "0.6405437", "0.62771046", "0.6205605", "0.6110183", "0.6102769", "0.60875654", "0.60744935", "0.60478365", "0.6042598", "0.60355145", "0.6011474", "0.5983928", "0.5964419", "0.59475535", "0.59363985", "0.5934032", "0.5933903", "0.5914174", "0.5906659", "0.58979267", "0.58755064", "0.58675873", "0.58566785", "0.58190644", "0.5812063", "0.58050513" ]
0.69136053
0
Add a skin path to the current configuration state. If ``discovery`` is enabled, the path will automatically be monitored for changes. The ``indexes`` argument is an optional list of view registrations with the provided names. The ``request_type`` option decides the request type for which to make the registration.
def register_path(config, spec, discovery=False, indexes=[], request_type=None): package_name, path = resolve_asset_spec(spec) if package_name is not None: path = pkg_resources.resource_filename(package_name, path) else: path = caller_path(path) if package_name is None: # absolute filename package = config.package else: __import__(package_name) package = sys.modules[package_name] context = ConfigurationMachine() context.registry = config.registry context.autocommit = False context.package = package context.route_prefix = getattr(config, 'route_prefix', None) directive = skins(context, path, discovery, request_type) for index in indexes: directive.view(config, index) for action in directive(): config.action(*action)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_skin( self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images' ):\n self.skin_name = skin_name\n self.skin_path = skin_path\n\n skins = getToolByName( target, 'portal_skins', None )\n if skins is None:\n return\n\n skin_paths = skins.getSkinPaths()\n\n include = ()\n found = 0\n for id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter( lambda x, cp=paths: x not in cp, views ) + paths\n found = 1\n\n if not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\n\n skins.manage_properties( add_skin=1, skinname=self.skin_name, skinpath=join(include, ', ') )\n skins.default_skin = self.skin_name", "def register(self, path, type='js'):\n if not type in ('js', 'css'):\n raise ValueError('Only js or css types are supported.')\n if type == 'js':\n JS_EXTENSIONS.append(path)\n else:\n CSS_EXTENSIONS.append(path)", "def register_class_views(state):\n try:\n prefixes = state.app.request_prefixes\n except AttributeError:\n prefixes = []\n state.app.request_prefixes = prefixes\n prefixes.append(state.url_prefix if state.url_prefix is not None else '')\n # Personal list\n personal_view = PersonalRequests.as_view('personal_requests')\n state.add_url_rule('/personal/', view_func=personal_view)\n state.add_url_rule('/personal/rss.xml', view_func=personal_view)\n state.add_url_rule('/personal/<path:filters>', view_func=personal_view)\n # Payout list\n payout_view = PayoutListing.as_view('list_approved_requests')\n payout_url_stub = '/pay/'\n state.add_url_rule(payout_url_stub, view_func=payout_view)\n state.add_url_rule(payout_url_stub + 'rss.xml', view_func=payout_view)\n state.add_url_rule(payout_url_stub + '<path:filters>',\n view_func=payout_view)\n # Other more generalized listings\n register_perm_request_listing(state, 'list_pending_requests',\n '/pending/', (PermissionType.review, PermissionType.audit),\n ActionType.pending, u'Pending Requests')\n register_perm_request_listing(state, 'list_completed_requests',\n '/completed/', PermissionType.elevated, ActionType.finalized,\n u'Completed Requests')\n # Special all listing, mainly intended for API users\n register_perm_request_listing(state, 'list_all_requests',\n '/all/', PermissionType.elevated, ActionType.statuses,\n u'All Requests')", "def register_view():\n\n icon_set_id = request.args.get(\"iconSetId\")\n ip_address = request.remote_addr\n ip_address_anonymized = anonymize_ip(ip_address)\n\n # Add IP address to corresponding icon set\n if icon_set_id not in view_addresses:\n view_addresses[icon_set_id] = [ip_address_anonymized]\n view_counts[icon_set_id] = 1\n elif ip_address_anonymized not in view_addresses[icon_set_id]:\n view_addresses[icon_set_id].append(ip_address_anonymized)\n view_counts[icon_set_id] += 1\n else:\n return \"\"\n\n with open(path_views, \"w+\") as view_file:\n # Write updated object to file\n json.dump(view_addresses, view_file)\n\n return \"\"", "def register_view( self, target, view ):\n skins = getToolByName( target, 'portal_skins', None )\n write = self.stream.write\n\n if skins._getOb( view, None ) is not None:\n write( \"Failed to register view '%s' (already exists)\\n\" % view )\n return view\n\n found = 0\n dw_path = os.path.join( minimalpath(package_home( globals() )), *view.split('/') )\n dw_path = re.sub(r'\\\\', r'/', dw_path)\n\n for dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith( dw_path ):\n found = 1\n break\n\n if not found:\n write( \"Failed to register view '%s' (directory not found)\\n\" % view )\n return view\n\n # TODO: handle paths better\n dw_path = dw_path.replace( '\\\\', '/' )\n DirectoryView.manage_addDirectoryView( skins, dw_path )\n write( \"Registered view '%s' = '%s'\\n\" % ( view, dw_path ) )\n\n return view", "def register_routes(\n config: Configurator,\n route_name_ext: str = \"x-pyramid-route-name\",\n root_factory_ext: str = \"x-pyramid-root-factory\",\n apiname: str = \"pyramid_openapi3\",\n route_prefix: t.Optional[str] = None,\n) -> None:\n\n def action() -> None:\n spec = config.registry.settings[apiname][\"spec\"]\n for pattern, path_item in spec[\"paths\"].items():\n route_name = path_item.get(route_name_ext)\n if route_name:\n root_factory = path_item.get(root_factory_ext)\n config.add_route(\n route_name,\n pattern=route_prefix + pattern\n if route_prefix is not None\n else pattern,\n factory=root_factory or None,\n )\n\n config.action((\"pyramid_openapi3_register_routes\",), action, order=PHASE1_CONFIG)", "def add(app, url = None, path = None, endpoint=None, index='index.html'):\n url = url or app.static_url_path or ''\n path = os.path.abspath(path or app.static_folder or '.')\n endpoint = endpoint or 'static_' + os.path.basename(path)\n\n if path == app.static_folder:\n if url != app.static_url_path:\n raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'\n ' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))\n else:\n @app.route(url + '/<path:filename>', endpoint = endpoint)\n def static_files(filename):\n return send_from_directory(path, filename)\n\n if index:\n @app.route(url + '/', endpoint = endpoint + '_index')\n def static_index():\n return send_from_directory(path, index)\n\n if url:\n @app.route(url, endpoint = endpoint + '_index_bare')\n def static_index_bare():\n return send_from_directory(path, index)", "def _add_config_arg(self, type_, content_type, name, default=None,\n required=False, methods=ALL_HTTP_METHODS):\n if methods == '*':\n methods = ALL_HTTP_METHODS\n arg = type_(methods, content_type, name, default, required)\n for method in methods:\n differentiator = (method, content_type)\n if not self.contains(type_, differentiator):\n self.register(type_, Registry(), differentiator)\n registry = self.get(type_, differentiator)\n registry.register(type_, arg, name)", "def add_path_for_monitoring(self, path, prefix):\n pass", "def register_index_view(self, blueprint):\n view = apply_decorators(self.index_view, self.index_decorators)\n blueprint.add_url_rule(self.index_rule, self.index_endpoint, view)", "def test_add_index_settings(self):\n response = self.post('base:add_index', self.form_data(), **self.slugs())\n assert 'base/project.html' in response.template_name\n\n response = self.get('base:index_settings', **self.index_slugs())\n assert 'base/index_update_form.html' in response.template_name", "def add_path(self, adapter: str, wwpn: str):\n self.paths.append((adapter, wwpn))", "def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n # import step in zcml, but doing it in code makes this method usable as \n # upgrade step as well. Note that this silently does nothing when there is \n # no catalog.xml, so it is quite safe.\n setup = getToolByName(context, 'portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'catalog')\n \n catalog = getToolByName(context, 'portal_catalog')\n indexes = catalog.indexes()\n \n # Specify the indexes you want, with ('index_name', 'index_type')\n wanted = (('county', 'FieldIndex'),\n ('constituency', 'FieldIndex'),\n ('priority_number', 'FieldIndex'), \n ('political_party', 'FieldIndex'),\n ('elected_nominated', 'FieldIndex'),\n ('member_status', 'FieldIndex'),\n ('special_interest', 'FieldIndex'),\n ('other_names', 'FieldIndex'),\n ('member_role', 'FieldIndex'),\n ('member_title', 'FieldIndex'),\n ('body_text', 'FieldIndex'),\n ('member_full_names', 'ZCTextIndex'),\n )\n\n indexables = []\n for (name, meta_type) in wanted:\n if meta_type and name not in indexes:\n if meta_type == 'ZCTextIndex':\n item_extras = Empty()\n item_extras.doc_attr = name\n item_extras.index_type = 'Okapi BM25 Rank'\n item_extras.lexicon_id = 'plone_lexicon'\n catalog.addIndex(name, meta_type, item_extras)\n else:\n catalog.addIndex(name, meta_type)\n \n indexables.append(name)\n logger.info('Added %s for field %s.', meta_type, name)\n if len(indexables) > 0:\n logger.info('Indexing new indexes %s.', ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def add_views(self, *args):\n for view in args:\n self.add_view(view)", "def add_data(self, skins, method='common', colours=8):\n if not isinstance(skins, list):\n skins = [skins]\n for skin in skins:\n if method == 'common':\n rgb = ImageColour.get_most_common(skin.get_file_path(self.skin_directory, 'loading'), colours)\n else:\n rgb = ImageColour.get_average(skin.get_file_path(self.skin_directory, 'loading'))\n h, radius, _ = rgb_to_hsv(rgb.r, rgb.g, rgb.b)\n angle = h * 2 * np.pi\n img = Image.open(skin.get_file_path(self.skin_directory, 'tiles'))\n ab = AnnotationBbox(OffsetImage(img, zoom=0.13), (angle, radius), frameon=False)\n self.ax.add_artist(ab)\n self.figure = plt.gcf()", "def includeme(config):\n add_view(config)", "def skinCluster(*args, addInfluence: Union[AnyStr, List[AnyStr]]=\"\", addToSelection: bool=True,\n after: bool=True, afterReference: bool=True, baseShape: Union[AnyStr,\n List[AnyStr]]=\"\", before: bool=True, bindMethod: Union[int, bool]=0,\n deformerTools: bool=True, dropoffRate: Union[float, bool]=0.0, exclusive:\n Union[AnyStr, bool]=\"\", forceNormalizeWeights: bool=True, frontOfChain:\n bool=True, geometry: Union[AnyStr, List[AnyStr], bool]=\"\", geometryIndices:\n bool=True, heatmapFalloff: float=0.0, ignoreBindPose: bool=True,\n ignoreHierarchy: bool=True, ignoreSelected: bool=True, includeHiddenSelections:\n bool=False, influence: Union[AnyStr, bool]=\"\", lockWeights: bool=True,\n maximumInfluences: Union[int, bool]=0, moveJointsMode: bool=True, name:\n AnyStr=\"\", normalizeWeights: Union[int, bool]=0, nurbsSamples: int=0,\n obeyMaxInfluences: bool=True, parallel: bool=True, polySmoothness: float=0.0,\n prune: bool=True, recacheBindMatrices: bool=True, remove: Union[bool,\n List[bool]]=True, removeFromSelection: bool=True, removeInfluence: Union[AnyStr,\n List[AnyStr]]=\"\", removeUnusedInfluence: bool=True, selectInfluenceVerts:\n AnyStr=\"\", skinMethod: Union[int, bool]=1, smoothWeights: float=0.0,\n smoothWeightsMaxIterations: int=2, split: bool=True, toSelectedBones: bool=True,\n toSkeletonAndTransforms: bool=True, unbind: bool=True, unbindKeepHistory:\n bool=True, useGeometry: bool=True, volumeBind: float=0.0, volumeType: int=0,\n weight: float=0.0, weightDistribution: Union[int, bool]=1, weightedInfluence:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def register_views(app: Application, base: str):\n cors = aiohttp_cors.setup(app)\n\n for view in views:\n logger.info(\"Registered %s at %s\", view.__name__, base + view.url)\n view.register_route(app, base)\n view.enable_cors(cors)", "def setPath(self, name, value):\n response = self.extendPath(name, value, True, True)\n return response", "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def add_path(self, path, path_item):\n if path not in self._swagger:\n self._swagger[path] = path_item\n else:\n for method, definition in path_item.items():\n if definition is not None:\n setattr(self._swagger[path], method, definition)", "def setup(config, *args, **kwargs):\n cfg_rest = config.get('rest',{}).get('grids',{})\n db_cfg = cfg_rest.get('database',{})\n\n # add indexes\n db = pymongo.MongoClient(**db_cfg).grids\n if 'grid_id_index' not in db.grids.index_information():\n db.grids.create_index('grid_id', name='grid_id_index', unique=True)\n\n handler_cfg = RESTHandlerSetup(config, *args, **kwargs)\n handler_cfg.update({\n 'database': motor.motor_tornado.MotorClient(**db_cfg).grids,\n })\n\n return [\n (r'/grids', MultiGridsHandler, handler_cfg),\n (r'/grids/(?P<grid_id>\\w+)', GridsHandler, handler_cfg),\n ]", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def add_http_server_index(self, value=u\"index.html index.htm\"):\n path = [u\"http\", u\"server\", u\"index\"]\n self.add_config_item(self._nodeconfig, value, path)", "def add_view( *args, **kwargs ):", "def _append_index_type(self, index_type):\n if index_type is IndexType.HASH:\n self.args.extend([\"ON\", \"HASH\"])\n elif index_type is IndexType.JSON:\n self.args.extend([\"ON\", \"JSON\"])\n elif index_type is not None:\n raise RuntimeError(f\"index_type must be one of {list(IndexType)}\")", "def add_loaders(self, loaders):\n # type: (List[AbstractTemplateLoader]) -> None\n self.runtime_configuration_builder.add_loaders(loaders)", "def register(self, uri: str, *, event_types: Iterable = ('CREATE', 'UPDATE', 'DELETE',)):\n allowed_events = ('CREATE', 'UPDATE', 'DELETE',)\n\n if not uri.startswith('/'):\n raise RuntimeError('every endpoint should start with a forward slash')\n\n def register_wrapper(coro_func):\n if not asyncio.iscoroutinefunction(coro_func):\n raise TypeError(f'Annotated functions should be coroutines. Use \\'async def\\'.')\n \n for event in event_types:\n if event not in allowed_events:\n raise RuntimeError(f'Event {event} not recognized.')\n\n self._registered_uris.append({\n 'uri': uri,\n 'event_types': event_types,\n 'coroutine_or_callable': coro_func\n })\n return coro_func\n return register_wrapper", "def addSpeciesTypeComponentIndex(self, *args):\n return _libsbml.MultiSpeciesType_addSpeciesTypeComponentIndex(self, *args)", "def add(self, config):\n self.__idx(config)" ]
[ "0.52020884", "0.48797432", "0.47036213", "0.47002146", "0.45867845", "0.45276535", "0.4520932", "0.45103517", "0.447265", "0.4450312", "0.44348246", "0.44156086", "0.4405821", "0.43939352", "0.43877882", "0.4384183", "0.43760398", "0.43514234", "0.4349834", "0.43400645", "0.42961922", "0.42879647", "0.42876133", "0.42703658", "0.42676818", "0.42622942", "0.42518768", "0.4233645", "0.41951826", "0.4171769" ]
0.6681504
0
Loads the Sentiment 140 dataset, with preprocessing Arguments
def load_sentiment_140(data_dir="data", num_words=None, num_rows=None, maxlen=None, test_split=0.2, seed=100, simple_classifier=False): if not maxlen: maxlen = 100 # Load dataset from file file_dir = data_dir + "/sentiment-140/training.1600000.processed.noemoticon.csv" sentiment_data = pd.read_csv(file_dir, encoding='ISO-8859-1', names=["Sentiment", "ID", "Date", "Query", "User", "Text"]) # Shuffle order of rows sentiment_data = sentiment_data.sample(frac=1, random_state=seed) # Only grab num_rows rows from data # How many rows of data to return. Default all if not num_rows: num_rows = len(sentiment_data["Sentiment"]) sentiment_data = sentiment_data.iloc[:num_rows] # Split training data x_train, x_test, y_train, y_test = train_test_split(sentiment_data["Text"].to_numpy(), sentiment_data["Sentiment"].to_numpy(), test_size=test_split, random_state=seed) # Convert labels of 4 to 1 y_train[y_train == 4] = 1 y_test[y_test == 4] = 1 # Apply text preprocessing to training text x_train, x_test = preprocess(x_train, x_test, y_train, num_words, maxlen, simple_classifier=simple_classifier) return (x_train, y_train), (x_test, y_test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_train_data(pos_file_name='train_pos_full.csv', neg_file_name='train_neg_full.csv'):\n pos_path = path.join('..', 'data', 'parsed', pos_file_name)\n neg_path = path.join('..', 'data', 'parsed', neg_file_name)\n\n pos_data = pd.read_csv(pos_path, header=None)\n pos_data.columns = ['text']\n pos_data['sentiment'] = 1\n\n neg_data = pd.read_csv(neg_path, header=None)\n neg_data.columns = ['text']\n neg_data['sentiment'] = -1\n\n train_data = pd.concat([pos_data, neg_data], axis=0)\n return train_data", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def load_data(self, modalities, args):\n print(\"Loading data...\")\n data_dir = os.path.abspath(args.data_dir)\n train_data = SubtitlesDataset(modalities, data_dir, mode='train',\n truncate=True, item_as_dict=True)\n test_data = SubtitlesDataset(modalities, data_dir, mode='test',\n truncate=True, item_as_dict=True)\n print(\"Done.\")\n if len(args.normalize) > 0:\n print(\"Normalizing \", args.normalize, \"...\")\n # Normalize test data using training data as reference\n test_data.normalize_(modalities=args.normalize,\n ref_data=train_data)\n # Normalize training data in-place\n train_data.normalize_(modalities=args.normalize)\n return train_data, test_data", "def load_data_and_labels():\n # Load data from files\n positive_examples = list(\n open(\"./data/rt-polarity.pos\", \"r\", encoding='latin-1').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(\n open(\"./data/rt-polarity.neg\", \"r\", encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]", "def splits(cls, text_field, label_field, root='.data',\n train='training.1600000.processed.noemoticon.csv', \n test='testdata.manual.2009.06.14.csv', \n neutral = None, **kwargs):\n \n path_train = root + train\n path_test = root + test\n \n if not os.path.exists(root):\n os.mkdir(root)\n \n if not os.path.exists(path_train) or not os.path.exists(path_test):\n path = cls.download(root)\n path_train = path + train\n path_test = path + test\n \n train_dataset = Sentiment140(path_train, text_field, label_field, neutral=neutral, **kwargs)\n test_dataset = Sentiment140(path_test, text_field, label_field, **kwargs)\n \n return train_dataset, test_dataset", "def load_data(nlp, cue_verbs, poly):\n train_dicts, _ = load_quote_authors(nlp)\n author_prediction_dataset = AuthorPredictionDataset(train_dicts, cue_verbs, poly)\n return np.array(train_dicts), author_prediction_dataset", "def load_dataset(fname, nb_lines):\n import os.path\n if os.path.isfile('safe/Amazon-'+str(nb_lines)+'.p'):\n return util.load('safe/Amazon-'+str(nb_lines)+'.p')\n count = 1\n X = []\n y = []\n with open(fname) as f:\n for line in f:\n text, label = read_line(line)\n #print((label, text))\n X.append(text)\n y.append(label)\n if count >= nb_lines:\n break\n count+=1\n\n #load pretrained dictonary\n dico = util.load('safe/vocab_gensim.p')\n preprocessor = text_preprocessing.Preprocessor(dico=dico)\n X = preprocessor.preprocess(X)\n #save the loaded dataset in a pickle for speeding up next run\n util.save((X,y), 'safe/Amazon-'+str(nb_lines)+'.p')\n return (X, y)", "def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def pre_process_data(filepath):\n positive_path = os.path.join(filepath, 'pos')\n negative_path = os.path.join(filepath, 'neg')\n\n pos_label = 1\n neg_label = 0\n\n dataset = []\n\n for filename in glob.glob(os.path.join(positive_path, '*.txt')):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n dataset.append((pos_label, f.read()))\n\n for filename in glob.glob(os.path.join(negative_path, '*.txt')):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n dataset.append((neg_label, f.read()))\n\n shuffle(dataset)\n\n return dataset", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]", "def pre_train(self, dataset, **kwargs):\n\n pass", "def load_text_and_label(data_file):\n # load data from file\n\n # splite by word\n dfRaw = pd.read_csv(data_file)\n dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()\n pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()\n neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()\n\n x_text = pos_examples + neg_examples\n x_text = np.array([clean_str(sentence) for sentence in x_text])\n # generate label (y)\n pos_labels = [[0,1] for _ in pos_examples]\n neg_labels = [[1,0] for _ in neg_examples]\n y = np.array(pos_labels + neg_labels)\n return [x_text, y]", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y", "def preprocess_corpus(train_sents):\n #lexicon_dict['stop_words'] = set(open('stop_words').read().split())\n lexicon_dict['people_name']=set(open('data\\\\lexicon\\\\firstname.5k').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\lastname.5000').read().title().split()))\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.family_name').read().title().split()))\n lexicon_dict['people_person']=set(open('data\\\\lexicon\\\\people.person').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.person.lastnames').read().title().split()))\n \n lexicon_dict['product']=set(open('data\\\\lexicon\\\\product').read().title().split())\n lexicon_dict['business_products']=set(open('data\\\\lexicon\\\\business.consumer_product').read().title().split())\n\n lexicon_dict['sports_team']=set(open('data\\\\lexicon\\\\sports.sports_team').read().title().split())\n\n lexicon_dict['tvprog']=set(open('data\\\\lexicon\\\\tv.tv_program').read().title().split())\n \n lexicon_dict['museum'] = set(open('data\\\\lexicon\\\\architecture.museum').read().title().split())\n lexicon_dict['auto_make']=set(open('data\\\\lexicon\\\\automotive.make').read().title().split())\n lexicon_dict['auto_model']=set(open('data\\\\lexicon\\\\automotive.model').read().title().split())\n lexicon_dict['award']=set(open('data\\\\lexicon\\\\award.award').read().title().split())\n lexicon_dict['fest_ser']=set(open('data\\\\lexicon\\\\base.events.festival_series').read().title().split())\n lexicon_dict['reg_name']=set(open('data\\\\lexicon\\\\bigdict').read().title().split())\n lexicon_dict['newspaper']=set(open('data\\\\lexicon\\\\book.newspaper').read().title().split())\n lexicon_dict['tv_channels']=set(open('data\\\\lexicon\\\\broadcast.tv_channel').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_company']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.consumer_company').read().title().split())\n\n lexicon_dict['business_sponsor']=set(open('data\\\\lexicon\\\\business.sponsor').read().title().split())\n lexicon_dict['top10']=set(open('data\\\\lexicon\\\\cap.10').read().title().split())\n lexicon_dict['top100']=set(open('data\\\\lexicon\\\\cap.100').read().title().split())\n lexicon_dict['cap500']=set(open('data\\\\lexicon\\\\cap.500').read().title().split())\n lexicon_dict['cap1000']=set(open('data\\\\lexicon\\\\cap.1000').read().title().split())\n lexicon_dict['video_game']=set(open('data\\\\lexicon\\\\cvg.computer_videogame').read().title().split())\n lexicon_dict['cvg_developer']=set(open('data\\\\lexicon\\\\cvg.cvg_developer').read().title().split())\n lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n #leaving out dictionaries.conf,english.stop,lower.100,lower.500,lower.1000,lower.5000,lower.10000\n lexicon_dict['dictionaries_conf']=set(open('data\\\\lexicon\\\\dictionaries.conf').read().title().split())\n lexicon_dict['english_stop']=set(open('data\\\\lexicon\\\\english.stop').read().title().split())\n lexicon_dict['lower_10000']=set(open('data\\\\lexicon\\\\lower.10000').read().title().split())\n #lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n \n lexicon_dict['university']=set(open('data\\\\lexicon\\\\education.university').read().title().split())\n lexicon_dict['gov_agency']=set(open('data\\\\lexicon\\\\government.government_agency').read().title().split())\n\n\n lexicon_dict['location']=set(open('data\\\\lexicon\\\\location').read().title().split())\n lexicon_dict['location'].update(set(open('data\\\\lexicon\\\\location.country').read().title().split()))\n lexicon_dict['sports_league']=set(open('data\\\\lexicon\\\\sports.sports_league').read().title().split())\n\n\n lexicon_dict['time_holiday']=set(open('data\\\\lexicon\\\\time.holiday').read().title().split())\n lexicon_dict['time_rec_event']=set(open('data\\\\lexicon\\\\time.recurring_event').read().title().split())\n lexicon_dict['roads']=set(open('data\\\\lexicon\\\\transportation.road').read().title().split())\n lexicon_dict['tvnet']=set(open('data\\\\lexicon\\\\tv.tv_network').read().title().split())\n\n lexicon_dict['ven_company']=set(open('data\\\\lexicon\\\\venture_capital.venture_funded_company').read().title().split())\n lexicon_dict['venues']=set(open('data\\\\lexicon\\\\venues').read().title().split())", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def set_up_data():\r\n \r\n X, Y = pretreatment.import_dataset()\r\n \r\n print('Applying cleansing...')\r\n X = pretreatment.pretreatment(X)\r\n Y = pretreatment.pretreatment(Y)\r\n \r\n indice = [i for i in range(len(X)) if (len(X[i]) > SENTENCE_LENGTH-2 and len(X[i]) < SENTENCE_LENGTH+1 and len(Y[i]) > SENTENCE_LENGTH-2 and len(Y[i]) < SENTENCE_LENGTH+1)]#(len(X[i]) > SENTENCE_LENGTH and len(X[i]) < 2 * SENTENCE_LENGTH and len(Y[i]) > SENTENCE_LENGTH and len(Y[i]) < 2 * SENTENCE_LENGTH)]\r\n X = [X[i] for i in indice]\r\n Y = [Y[i] for i in indice]\r\n \r\n X = pretreatment.standardize_sentence_length(X)\r\n Y = pretreatment.standardize_sentence_length(Y)\r\n \r\n print('Computing the corpus sizes...')\r\n compute_T(X, 'english')\r\n compute_T(Y, 'french')\r\n compute_S(X, 'english')\r\n compute_S(Y, 'french')\r\n compute_N(X, 'french')\r\n compute_N(Y, 'english')\r\n \r\n print('English corpus: %d tokens' % T_ENGLISH)\r\n print('French corpus: %d tokens' % T_FRENCH)\r\n print('English sentence length: %d' % S_ENGLISH)\r\n print('French sentence length: %d' % S_FRENCH)\r\n print('Number of sentences (both english and french): %d / %d' % (N_ENGLISH, N_FRENCH))\r\n \r\n print('Converting in one hot vectors')\r\n global CORPUS_ENGLISH, CORPUS_FRENCH\r\n params_ENGLISH = (N_ENGLISH, S_ENGLISH, T_ENGLISH)\r\n params_FRENCH = (N_FRENCH, S_FRENCH, T_FRENCH)\r\n X, CORPUS_ENGLISH= treatment.convert_to_one_hot(X, params_ENGLISH)\r\n Y, CORPUS_FRENCH= treatment.convert_to_one_hot(Y, params_FRENCH)\r\n \r\n return (X, Y)", "def load_data_and_labels(positive_data_file, negative_data_file):\n # Load data from files\n positive_examples = list(open(positive_data_file, \"r\",encoding='utf-8').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n print (\"len of pos\"+positive_data_file, len(positive_examples))\n negative_examples = list(open(negative_data_file, \"r\",encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n print (\"len of neg\"+negative_data_file,len(negative_examples))\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]", "def prepare_data(vocabulary_size):\n print(\"Downloading data from \" + _DATA_DIR_ +\"..\")\n getData(_DATA_DIR_)\n print(\"Creating Vocabulary..\")\n create_vocabulary( _VOCAB_DIR_, _RAW_SENTENCES_DIR_, vocabulary_size )\n print(\"Converting sentences to sequences of ids..\")\n data_to_token_ids( _RAW_SENTENCES_DIR_ , _SENTENCES_DIR, _VOCAB_DIR_ )", "def load_data_and_labels(positive_data_file, negative_data_file):\n\n # Load data from files\n positive_examples = list(open(positive_data_file, encoding=\"utf-8\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open(negative_data_file,encoding=\"utf-8\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n #x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]", "def __init__(self, data_filename):\n with open(data_filename, 'rb') as data_file:\n loaded_features = pickle.load(data_file)\n self.title_nlp_tfidf_features = loaded_features['title_NLP_TFIDF_features']\n self.other_features = loaded_features['other_features']\n self.category1_features = loaded_features['category1_features']\n self.category2_features = loaded_features['category2_features']\n self.category3_features = loaded_features['category3_features']\n self.material_features = loaded_features['material_features']\n self.who_made_features = loaded_features['whoMade_features']\n self.when_made_features = loaded_features['whenMade_features']\n self.style1_features = loaded_features['style1_features']\n self.style2_features = loaded_features['style2_features']\n self.feature_labels = loaded_features['feature_labels']", "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def load_1dsentiment(labels):\n labels = list(open(labels, \"r\").readlines())\n y_text = np.array([abs(int(re.sub(r\"\\n\", \"\", sent))) for sent in labels])\n \n return y_text", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def load_data(path_to_dir):\n train_pos = []\n train_neg = []\n test_pos = []\n test_neg = []\n with open(path_to_dir+\"train-pos.txt\", \"r\") as f:\n for i,line in enumerate(f):\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_pos.append(words)\n with open(path_to_dir+\"train-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_neg.append(words)\n with open(path_to_dir+\"test-pos.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_pos.append(words)\n with open(path_to_dir+\"test-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_neg.append(words)\n\n return train_pos, train_neg, test_pos, test_neg" ]
[ "0.6553573", "0.65504044", "0.6413694", "0.6387078", "0.63126004", "0.62980497", "0.62966526", "0.62732714", "0.6251968", "0.62356544", "0.6217227", "0.6212488", "0.61900204", "0.6179025", "0.61564225", "0.61428803", "0.614052", "0.6135629", "0.6135321", "0.6128348", "0.6127551", "0.6088766", "0.60859746", "0.606017", "0.6056866", "0.6042663", "0.6023433", "0.60154206", "0.5992312", "0.59830445" ]
0.7142621
0
Log into engine as administrator.
def login_as_admin(): users.loginAsUser( config.VDC_ADMIN_USER, config.VDC_ADMIN_DOMAIN, config.VDC_PASSWORD, filter=False ) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login_as_admin(self, username='admin', password='admin'):\n return self.login(**{'username': username, 'password': password})", "def connectToAdmin():\n connect(url='t3://' + admin_host + ':' + admin_port,\n adminServerName='AdminServer',\n username=admin_username,\n password=admin_password)", "def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()", "def p_makeAdminUser(self):\n\n # If already in database, return\n if self.dbManager.userExists(C_ADMINISTRATOR_USERNAME):\n return\n # Store admin in database\n self.dbManager.createUser(C_ADMINISTRATOR_USERNAME, C_ADMINISTRATOR_PASSWORD, UserRole.ADMIN, defaultPacemakerParameterData)", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()", "def do_admin_login():\n user_requested = request.form['email'].lower()\n password_requested = request.form['password']\n\n target_user = User.query.filter_by(mail=user_requested).first()\n if target_user is None:\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.check_password(password_requested):\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.state == StateType.ACTIVE:\n return Response(render_template('admin/login.html',\n message=\"User account deactivated. Cannot login.\"))\n\n resp = Response(render_template('admin/admin.html', user=target_user.name,\n message=\"Login succeeded\"))\n set_access_cookies(resp, create_access_token(identity=target_user.id))\n return resp", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def administrator():\n\n administrator = Administrator.objects.create(name='Michał', surname='Paluch',\n login='Udfsr43', password='Password_3',\n password_repeat='Password_3')\n return administrator", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def add_admin():\n email = Config.SITE_ADMIN\n password = input('Enter Admin Password >>> ')\n name = input('Enter Display Name >>> ')\n\n user = User(email, password, name)\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n print(\"%s has been added to the system as Admin\" % user.username)", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def _login_as_staff(self):\n self.client.login(username=self.user.username, password=self.password)", "def _login_as_staff(self):\n self.client.login(username=self.user.username, password=self.password)", "def index():\r\n session.admin = True\r\n return dict(message=\"hello from admin.py\")", "def login(self):\n\t\treturn", "def do_admin_login():\n if request.form['password'] == 'admin' and request.form['username'] == 'admin':\n teams = get_team()\n if teams:\n return render_template('team-players.html', teams=teams)\n else:\n return render_template('team-players.html')\n else:\n flash('Invalid username or password. Please try again!')\n return render_template('login.html')", "def admin_login(self, username, password, login_url=\"/admin/\"):\n from selenium.webdriver.common.by import By\n\n self.selenium.get(\"%s%s\" % (self.live_server_url, login_url))\n username_input = self.selenium.find_element(By.NAME, \"username\")\n username_input.send_keys(username)\n password_input = self.selenium.find_element(By.NAME, \"password\")\n password_input.send_keys(password)\n login_text = _(\"Log in\")\n with self.wait_page_loaded():\n self.selenium.find_element(\n By.XPATH, '//input[@value=\"%s\"]' % login_text\n ).click()", "def process_admin_login():\n\n entered_email = request.form.get(\"email\")\n entered_password = request.form.get(\"password\")\n admin = c.get_admin(entered_email, entered_password)\n\n if admin is False:\n flash('Invalid credentials. Please click on sign up to create an account!')\n return redirect('/')\n session['current_admin'] = entered_email\n ad_id = admin.admin_id\n flash('Logged in as %s' % entered_email)\n if admin.rescue_id is None:\n return redirect('/admin' + '/' + str(ad_id) + '/rescue-info')\n else:\n return redirect('/admin' + '/' + str(ad_id))", "def ConnectToAdminServer():\n payload = ['%s\\n' % FLAGS.admin_user,\n '%s\\n' % FLAGS.admin_password]\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((FLAGS.host, FLAGS.admin_port))\n RecvAndSleep(s)\n SendPayload(s, payload)\n logging.info(\n 'Connected to the admin console as %s/%s.' % (\n FLAGS.admin_user, FLAGS.admin_password))\n return s", "def make_user_admin(connection,user):\r\n with connection:\r\n connection.execute(MAKE_USER_ADMIN,(user,))", "def test_admin_can_login_to_web_portal(admin):", "def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()", "def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()", "def login_permitted_user(self):\n self.grant_permission()\n self.client.login(username=\"john\", password=\"pass\")", "def test_admin_user_login_with_redirect(self):\n self.get_page(\"/\")\n self.at_page(\"/\")\n self.login(\"admin\", \"admin\", \"/admin/\")\n self.at_page(\"/admin/\")\n self.should_see(\"Django administration\")", "def test_admin_user_login(self):\n self.login(\"admin\", \"admin\")\n self.should_see(\"This is your profile, admin.\")", "def set_admin():\n try:\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, __file__, None, 1)\n except:\n print(\"Could not set the UAC level.\")", "def admin_login():\n account = request.json['account']\n password = request.json['password']\n u = user.User.query.filter(user.User.account == account).first()\n if not u:\n abort(404)\n if u.password == password and u.role == 'admin':\n if u.token is None:\n u.generate_token()\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())\n else:\n abort(500)" ]
[ "0.7176565", "0.6739861", "0.6625097", "0.6613401", "0.6605429", "0.65614206", "0.65538454", "0.6474383", "0.6392441", "0.63847953", "0.63789195", "0.63739824", "0.636728", "0.6315065", "0.6315065", "0.6308905", "0.6295946", "0.629345", "0.6289722", "0.62745535", "0.62532014", "0.62227714", "0.6186564", "0.61570656", "0.61570656", "0.6125799", "0.61117566", "0.6100807", "0.6098163", "0.60774463" ]
0.73660433
0
Start both motors. `rundirect` command will allow to vary motor performance on the fly by adjusting `duty_cycle_sp` attribute. Doesn't stop until told to stop. Bias determines if there is a bias on one motor to move faster for correction. The bias is simply added to the speed for the current biasDirection
def forward(speed, bias, biasDir): # todo: check directions for me please if biasDir == 1: rightMotor.run_direct(duty_cycle_sp=speed+bias) leftMotor.run_direct(duty_cycle_sp=speed) elif biasDir == -1: rightMotor.run_direct(duty_cycle_sp=speed) leftMotor.run_direct(duty_cycle_sp=speed+bias) else: rightMotor.run_direct(duty_cycle_sp=speed) leftMotor.run_direct(duty_cycle_sp=speed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def drive(left_motor, right_motor, left_sp, right_sp):\n left_motor.run_forever(speed_sp=left_sp)\n right_motor.run_forever(speed_sp=right_sp)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def turn(dir, speed, runtime):\n\trightMotor.run_timed(duty_cycle_sp=-dir*speed, time_sp=runtime)\n\tleftMotor.run_timed(duty_cycle_sp=dir*speed, time_sp=runtime)", "def drive_straight(left_motor, right_motor, time_s):\n print(\"Driving straight...\")\n left_motor.run_forever(speed_sp=400)\n right_motor.run_forever(speed_sp=400)\n time.sleep(time_s)\n left_motor.stop(stop_action=\"brake\")\n right_motor.stop(stop_action=\"brake\")\n # This solution uses run_forever then a time delay there is also a run_timed method\n # that could've been used instead if control needs to be returned immediately\n # Typically the solution above is the more common drive pattern.", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def setMotors(self, left_dist: int, right_dist: int, speed: int) -> None:\n \"\"\"\n The following is a work-around for a bug in the Neato API. The bug is that the\n robot won't stop instantly if a 0-velocity command is sent - the robot\n could continue moving for up to a second. To work around this bug, the\n first time a 0-velocity is sent in, a velocity of 1,1,1 is sent. Then,\n the zero is sent. This effectively causes the robot to stop instantly.\n \"\"\"\n if (int(left_dist) == 0 and int(right_dist) == 0 and int(speed) == 0):\n if (not self._stopped):\n self._stopped = True\n left_dist = 1\n right_dist = 1\n speed = 1\n else:\n self._stopped = False\n\n self._protocol.write_line(\n 'setmotor lwheeldist {} rwheeldist {} speed {}'.format(\n left_dist, right_dist, speed))", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def start_drill(self):\n\n # Enable all motors\n # NOTE 1: Order matters!\n # NOTE 2: BFM not energized since it will cause motor to move but it is pushed back a bit to ensure the feed is all the way back.\n self.bfm_startup()\n self.fmt.energize_motor()\n self.fmb.energize_motor()\n self.ym.energize_motor()\n self.pm.energize_motor()\n # NOTE: BQM may need to be enabled right before running the drill\n self.bqm.energize_motor()\n\n # Enabling motors takes time\n # NOTE: This may need to be optimized after all the motors have been tuned to make the process faster\n time.sleep(2)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def turn():\n\n # We want to turn the robot wheels in opposite directions from 1/4 to 3/4\n # of a second. Use `random.choice()` to decide which wheel will turn which\n # way.\n power = choice([(1, -1), (-1, 1)])\n t = randint(250, 750)\n\n for m, p in zip(motors, power):\n m.run_timed(speed_sp = p * 750, time_sp = t)\n\n # Wait until both motors are stopped:\n while any(m.state for m in motors):\n sleep(0.1)", "def start():\n for m in motors:\n m.run_direct()", "def arm(self):\n self.set_destination(0, 0, 0, 0)\n\n for _ in range(100):\n self.local_pos_pub.publish(self.waypoint_g)\n rospy.sleep(0.01)\n\n rospy.loginfo(CBLUE2 + \"Arming Drone\" + CEND)\n\n arm_request = CommandBoolRequest(True)\n\n while not rospy.is_shutdown() and not self.current_state_g.armed:\n rospy.sleep(0.1)\n response = self.arming_client(arm_request)\n self.local_pos_pub.publish(self.waypoint_g)\n else:\n if response.success:\n rospy.loginfo(CGREEN2 + \"Arming successful\" + CEND)\n return 0\n else:\n rospy.logerr(CRED2 + \"Arming failed\" + CEND)\n return -1", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def set_right(self, spd):\n self.r_motor.set(spd)", "def set_control_commands(self, ref_state, ref_ind):\n if not self.at_dest:\n self.commands['speed'] = self.cruising_speed * (5. / self.traffic_level)\n else:\n self.commands['speed'] = 0.0\n dx = ref_state[0] - self.x\n dy = ref_state[1] - self.y\n dx_v = numpy.cos(self.yaw) * dx + numpy.sin(self.yaw) * dy\n\n # To overtake, move to the left a little bit and follow your original traj.\n stay_overtake = False\n if self.overtake:\n self.overtake_begin_ignore += 1\n else:\n self.overtake_begin_ignore = 0\n if self.overtake and len(self.radar_readings[0, :]) > 0:\n stay_overtake = numpy.min(self.radar_readings[0, :]) > 30\n rospy.logerr(self.overtake_begin_ignore)\n if self.overtake_begin_ignore < 3:\n stay_overtake = True\n if not stay_overtake:\n self.overtake = False\n self.overtake_begin_counter = 0\n self.commands['speed'] *= 0\n # rospy.logerr('chcek for stay overtaking: ' + str(stay_overtake))\n else:\n stay_overtake = True\n\n if self.overtake and stay_overtake:\n self.commands['speed'] *= 1.5\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy + 7.5\n else:\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy\n dyaw_v = ref_state[2] - self.yaw\n # Correct yaw difference. dyaw_v 0..pi\n while dyaw_v > numpy.pi:\n dyaw_v -= 2*numpy.pi\n while dyaw_v < -numpy.pi:\n dyaw_v += 2*numpy.pi\n # Calculate steering command from dy_v, dx_v and dyaw_v\n steering_command = dy_v + dyaw_v * 1.5 / (1 + dx_v)\n # Compare with max steering angle\n if steering_command > 0.5:\n steering_command = 0.5\n elif steering_command < -0.5:\n steering_command = -0.5\n self.commands['steering_angle'] = steering_command", "def move(self, direction, speed):\n self.motor_A(direction, speed)\n self.motor_B(direction, speed)", "def right(self, speed=1):\n self.left_motor.forward(speed)\n self.right_motor.backward(speed)", "def _switch_motors(self, state):\n # Relay control was moved to its own package\n if self.relayExists:\n if not self._SwitchingMotors: # Prevent overlapping runs\n self._SwitchingMotors = True\n # Switch \"on\" to \"off\" if not safe to operate,\n # then we can just pass state to arlobot_usbrelay\n if not self._SafeToOperate:\n state = False\n rospy.wait_for_service('/arlobot_usbrelay/toggle_relay')\n rospy.loginfo(\"Switching motors.\")\n try:\n toggle_relay = rospy.ServiceProxy('/arlobot_usbrelay/toggle_relay', ToggleRelay)\n left_relay_result = toggle_relay(self.usbLeftMotorRelayLabel, state)\n right_relay_result = toggle_relay(self.usbRightMotorRelayLabel, state)\n if left_relay_result.toggleSuccess and right_relay_result.toggleSuccess:\n self._motorsOn = True\n else:\n self._motorsOn = False\n except rospy.ServiceException as e:\n rospy.loginfo(\"Service call failed: %s\" % e)\n self._SwitchingMotors = False\n else: # If no automated motor control exists, just set the state blindly.\n self._motorsOn = state", "def reverse_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=-int(left_speed))\n self.right_motor.run_forever(speed_sp=-int(right_speed))", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def set_control_commands(self, ref_state, ref_ind):\n super(DummyVehicle, self).set_control_commands(ref_state, ref_ind)\n safety_distance = 20.\n full_stop_distance = 15.\n\n\n self.check_if_overtake_is_finished()\n\n # Only continue from this point if there are some radar sensings\n if not numpy.any(self.radar_readings[0, :]):\n return\n\n\n min_dist = numpy.min(self.radar_readings[0, :])\n # Set speed.\n if min_dist < full_stop_distance:\n desired_speed = 0.\n self.overtake_begin_counter = 0\n\n elif min_dist < safety_distance:\n desired_speed = self.cruising_speed * min_dist / safety_distance\n else:\n desired_speed = self.cruising_speed\n\n # Every subclass can\n if not self.overtake:\n if self.check_if_overtake(min_dist):\n if self.check_if_safe_to_overtake():\n rospy.logwarn(str(self.vehicle_id) + ' start overtaking')\n self.overtake = True\n\n self.commands['speed'] = desired_speed" ]
[ "0.6224331", "0.5952258", "0.590249", "0.5720424", "0.5404056", "0.537829", "0.5353262", "0.5336252", "0.5292444", "0.52533853", "0.5243494", "0.52409536", "0.52345574", "0.5230445", "0.51993316", "0.5184321", "0.51791024", "0.5114285", "0.5100415", "0.50611645", "0.5027267", "0.49937862", "0.4986073", "0.49840745", "0.49837455", "0.49821302", "0.49346486", "0.49071613", "0.49071613", "0.4891825" ]
0.70682174
0
Verify the output of 'system_platform' function
def test_system_platform(): accepted_values = ['windows', 'linux'] output = sh.system_platform() assert output in accepted_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_os_system(self):\n self.assertEqual(self.settings.OS_SYSTEM, platform.system())", "def check_platform(target_platform):\n if target_platform == PLATFORM_LINUX:\n pass\n elif target_platform == PLATFORM_WINDOWS:\n # requires wine\n try:\n subprocess.run([\"wine\", \"--help\"], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n except:\n log_error(\"wine needs to be installed\")\n else:\n log_error(f\"something is strange with the platform type '{target_platform}'\")", "def test_get_system(self):\n pass", "def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def is_system(self) -> bool:", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def check_platform():\n system = platform.system()\n distro = platform.platform()\n is_raspberry_pi = False\n try:\n info = open(\"/proc/cpuinfo\").read()\n except FileNotFoundError:\n is_raspberry_pi = False\n else:\n # bcm2708: Raspberry Pi 1\n # bcm2709: Raspberry Pi 2\n # bcm2710: Raspberry Pi 3\n is_raspberry_pi = 'BCM27' in info or 'ODROID' in info\n\n return system == \"Linux\" and (\n os.path.isfile('/proc/device-tree/hat/uuid') or\n 'boot2docker' in distro.lower() or\n is_raspberry_pi or\n os.path.isfile('/sys/hypervisor/uuid') or\n os.path.isdir('/var/lib/digitalocean')\n )", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def platform():\n return ['linux']", "def test_os_machine(self):\n self.assertEqual(self.settings.OS_MACHINE, platform.machine())", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system", "def this_host():\n host_os = platform.system()\n print('This platform OS is: ', host_os)\n return", "def get_platform():\n platforms = [\n \"Android\",\n \"Linux.RaspberryPi\",\n \"Linux\",\n \"XBOX\",\n \"Windows\",\n \"ATV2\",\n \"IOS\",\n \"OSX\",\n \"Darwin\",\n ]\n\n for platform in platforms:\n if xbmc.getCondVisibility('System.Platform.'+platform):\n return platform\n return \"Unknown\"", "def check_platform():\n if os.getcwd() != os.path.dirname(os.path.abspath(__file__)):\n error = \"must be ran in the directory it's located at\"\n if os.path.sep != '/':\n error = \"a unix-like operating system is required\"\n elif not shutil.which('dpkg-deb'):\n error = \"cannot find dpkg-deb\"\n elif os.getuid() != 0:\n error = \"must be ran as root (or with fakeroot)\"\n else:\n return\n sys.exit(\"{}: error: {}\".format(sys.argv[0], error))", "def test_check_system_cmd_line(self):\n\n intro = \"Checking your system, this may take a few seconds...\"\n\n cmd = ['pydroid', 'check']\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out = p.communicate()[0]\n self.assertIn(intro, out)\n self.assertTrue('Success' in out or 'Fix' in out)", "def test_os_processor(self):\n self.assertEqual(self.settings.OS_PROCESSOR, platform.processor())", "def _validate_os(module):\n rc, out, err = module.run_command(['cat', '/etc/os-release'])\n\n # Validate for a BSD string in output\n if 'BSD' not in out:\n msg_err = 'Error: Unsupported OS. This can only be used on BSD systems.'\n module.fail_json(msg=msg_err)", "def usefulFunction():\n# I think the uname platform is a func. for findout out the information of the computer\n print(platform.uname())", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()" ]
[ "0.7402488", "0.73765874", "0.7269435", "0.7166334", "0.706433", "0.7028891", "0.6924635", "0.6917631", "0.6908076", "0.67770875", "0.6775674", "0.67477757", "0.672745", "0.6721273", "0.6716984", "0.6715146", "0.6706232", "0.6676223", "0.66573375", "0.66256523", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131", "0.66131" ]
0.84509456
0
Verify the output of 'format_resource' function
def test_format_resource(): mock_name = "rg-001" output = sh.format_resource(mock_name) assert output == "rg-001"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_valid_resource():\n mock_name = \"rg-001\"\n output = sh.is_valid_resource(mock_name)\n assert output is True", "def test_workload_get_command_human_readable(\n workload_get_success, workload_get_success_hr\n):\n hr_output = prepare_workload_get_output(workload_get_success)\n assert hr_output == workload_get_success_hr", "def test_str(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected result of the str function\n self.assertEqual(str(resource), (\"ID: 1 \\nTitle: White Noise \"\\\n \"\\nCreator: Don DeLillo \\nSummary: Delillo's White Noise follows \"\\\n \"narrator Jack Gladney, a professor at a \\nsmall Liberal Arts \"\\\n \"college and describes an academic year. Jack teaches \\nat ... \"\\\n \"\\nGenre: sci-fi \\nLanguage: English \\nYear: 1985 \"\\\n \"\\nCountry: US \\nLength: 326p \\nType: book \"\\\n \"\\nKeywords: culture, life, society, survival\"))", "def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def format_resource(resource, permissions=None, basic_info=False):\n def fmt_res(res, perms, info):\n result = {\n u\"resource_name\": str(res.resource_name),\n u\"resource_display_name\": str(res.resource_display_name or res.resource_name),\n u\"resource_type\": str(res.resource_type),\n u\"resource_id\": res.resource_id\n }\n if not info:\n result.update({\n u\"parent_id\": res.parent_id,\n u\"root_service_id\": res.root_service_id,\n u\"children\": {},\n u\"permission_names\": list() if perms is None else format_permissions(perms)\n })\n return result\n\n return evaluate_call(\n lambda: fmt_res(resource, permissions, basic_info),\n httpError=HTTPInternalServerError,\n msgOnFail=\"Failed to format resource.\",\n content={u\"resource\": repr(resource), u\"permissions\": repr(permissions), u\"basic_info\": str(basic_info)}\n )", "def test_object_provision_command_when_valid_arguments_provided_human_readable(\n mock_client, object_provision_success, object_provision_success_hr\n):\n resp = prepare_object_provision_output(object_provision_success)\n assert resp == object_provision_success_hr", "def testGetStringResourceInfo(self):\n stringResource = self._createSampleResource(Tag.string)\n stringTagInfo = self.converter._getInfoFromResourceTag(stringResource)\n self.assertTrue(len(stringTagInfo) == 1)\n self.assertEqual(stringTagInfo[0][self.converter.TAG],\n stringResource.tag)\n self.assertEqual(stringTagInfo[0][self.converter.TEXT],\n stringResource.text)\n self.assertEqual(stringTagInfo[0][self.converter.NAME_FLAG],\n stringResource.attrib['name'])\n self.assertEqual(stringTagInfo[0][self.converter.TRANSLATABLE_FLAG],\n stringResource.attrib['translatable'])", "def test_virtual_service_create_command_for_human_readable(\n virtual_service_create_success, virtual_service_success_hr\n):\n resp = prepare_virtual_service_output(virtual_service_create_success)\n assert resp == virtual_service_success_hr", "def test_repr(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\", \n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(resource), (\"Resource(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']')\"))", "def test_validation_get_valid_formats(self):\n self.assertIsInstance(api.validation.fetch_formats(), dict)", "def test_str(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected result of the str function\n self.assertEqual(str(book), (\"ID: 1 \\nTitle: White Noise \"\\\n \"\\nCreator: Don DeLillo \\nSummary: Delillo's White Noise follows \"\\\n \"narrator Jack Gladney, a professor at a \\nsmall Liberal Arts \"\\\n \"college and describes an academic year. Jack teaches \\nat ... \"\\\n \"\\nGenre: sci-fi \\nLanguage: English \\nYear: 1985 \"\\\n \"\\nCountry: US \\nLength: 326p \\nType: book \"\\\n \"\\nKeywords: culture, life, society, survival\\nPublisher: \"\\\n \"Penguin Group \\nCity: New York \\nCategory: fiction\"))", "def test_nonstandard_resource(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['resources']['scalar'].append({'name': 'chocolate', 'value': 1.0 })\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'NONSTANDARD_RESOURCE')", "def test_format_variations(test_format, expect_format):\n format_schema = sd.DeploymentFormat()\n try:\n result_format = format_schema.deserialize(test_format)\n result_format.pop(\"$schema\", None)\n assert result_format == expect_format\n except colander.Invalid:\n pytest.fail(f\"Expected format to be valid: [{test_format}]\")", "def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')", "def test_enforcement_boundary_create_command_human_readable(\n enforcement_boundary_success, enforcement_boundary_success_hr\n):\n hr_output = prepare_enforcement_boundary_create_output(enforcement_boundary_success)\n\n assert hr_output == enforcement_boundary_success_hr", "def test_workloads_list_command_human_readable(\n workloads_list_success, workloads_list_success_hr\n):\n hr_output = prepare_workloads_list_output(workloads_list_success)\n assert hr_output == workloads_list_success_hr", "def test_make_json_simple(self):\n resources = get_test_resources()\n output, filename = make_json(**resources)\n output = json.loads(output)\n self.assertEqual(\n output[\"text\"][\"0\"][\"0\"][\"0\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Text passages should be parsed correctly\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"0\"], \"Qui tecum cupis esse meos ubicumque libellos \",\n \"Text passages should be parsed correctly\"\n )\n\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"1\"], \"Et comites longae quaeris habere viae, Something\",\n \"Text passages should be parsed correctly and note kept\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"1\"][\"3\"], \"Crede slug. mihi, nimium Martia turba sapit. \",\n \"Text passages should be parsed correctly and abbr kept\"\n )\n self.assertEqual(\n filename, \"textgroup__work__lat.json\",\n \"Filename should be created in a stable and understandable manner\"\n )\n self.assertEqual(\n output[\"original-urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1\",\n \"Original URN should be fed\"\n )\n self.assertEqual(\n output[\"urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1-simple\",\n \"CLTK URN should be suffixed\"\n )\n self.assertEqual(\n output[\"credit\"], \"\",\n \"Credit should be empty by default\"\n )\n self.assertEqual(\n output[\"meta\"], \"book-poem-line\",\n \"meta should reflect the citation scheme\"\n )\n self.assertEqual(\n output[\"author\"], \"textgroup\",\n \"Author name should be the English textgroup name\"\n )\n self.assertEqual(\n output[\"work\"], \"work\",\n \"Work name should be the English work name\"\n )\n self.assertEqual(\n output[\"edition\"], \"description\",\n \"We should have the English description\"\n )", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def print_resource():\n logging.info(\"__package__: %s\", __package__)\n logging.info(\"__name__: %s\", __name__)\n logging.info(\"JSON_RESOURCE: %s\", JSON_RESOURCE)\n logging.info(\"JSON_PATH: %s\", JSON_PATH)", "def test_repr(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(book), (\"Book(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']', \"\\\n \"'Penguin Group', 'New York', 'fiction')\"))", "def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )", "def validate_format(self):\n raise NotImplementedError()", "def test_colorify(self):\n\n # pylint: disable=protected-access\n\n expected = False\n actual = self.file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is not designed for colorify\n expected = self.to_print[\"basic_string\"]\n actual = Prints(None, \"Hehehe\", output_file=None, only_on_file=False)._colorify(\n self.to_print[\"basic_string\"]\n )\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is UP\n expected = Fore.BLACK + Back.GREEN + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.up],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is DOWN\n expected = Fore.BLACK + Back.RED + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.down],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)\n\n # Test with a template that is designed for colorify + Status is\n # UNKNOWN or INVALID\n expected = Fore.BLACK + Back.CYAN + self.to_print[\"basic_string\"]\n actual = Prints(\n [\"This is a test\", PyFunceble.STATUS.official.invalid],\n \"Generic\",\n output_file=None,\n only_on_file=False,\n )._colorify(self.to_print[\"basic_string\"])\n\n self.assertEqual(expected, actual)", "def test_ip_lists_get_command_human_readable(ip_lists_success, ip_lists_success_hr):\n hr_output = prepare_ip_lists_get_output(ip_lists_success)\n assert hr_output == ip_lists_success_hr", "def _test_output_formatting_func(self, sample: Any):\n try:\n if not type(sample) == iter:\n self._formatting_func_return_types(format=sample)\n return True\n except Exception:\n raise ValueError(\n f\"formatting_func must return {self._formatting_func_return_types.__annotations__['format']}, not {type(sample)}\"\n )", "def test_get_resource_string(self):\n # pylint: disable=protected-access\n student_view_html = self.xblock.student_view().content\n test_result = AdaptiveNumericInput.get_resource_string('view.html')\n test_result = test_result.format(\n self=self,\n attempts_message=self.xblock.get_attempts_message(),\n display_name=self.xblock.display_name,\n feedback_label='',\n feedback_message='',\n hint_message='',\n hintdisplay_class=self.xblock.get_css_hint_button_display(),\n hide_submit_class=self.xblock.get_css_hide_submit(),\n indicator_class=self.xblock.get_css_indicator(),\n indicator_visibility_class=self.xblock.get_css_indicator_hidden(),\n progress_message=self.xblock.get_progress_message(),\n prompt=self.xblock.prompt,\n saved_message='',\n student_answer=self.xblock.student_answer,\n submitted_message='',\n )\n self.assertEquals(student_view_html, test_result)", "def PrintResource(resource):\n print resource.resource_id.text, resource.GetResourceType()", "def check_empty_desc_file(out):\n return MISSING_RESOURCE in out.lower()", "def test_format():\n device = \"/dev/sdX1\"\n mock = MagicMock(return_value=0)\n with patch.dict(disk.__salt__, {\"cmd.retcode\": mock}), patch(\n \"salt.utils.path.which\", MagicMock(return_value=True)\n ):\n assert disk.format_(device) is True" ]
[ "0.64937514", "0.6144749", "0.60763", "0.6063581", "0.59908366", "0.5959676", "0.59036434", "0.58681643", "0.58234644", "0.58226424", "0.58003753", "0.5777817", "0.5730031", "0.57299966", "0.57008356", "0.56684434", "0.5653117", "0.5652297", "0.5613386", "0.5596623", "0.55935276", "0.55754024", "0.5567154", "0.55592537", "0.5557003", "0.5555431", "0.5547428", "0.55448437", "0.55443937", "0.5532806" ]
0.76800674
0
Verify the output of 'is_valid_resource' function
def test_is_valid_resource(): mock_name = "rg-001" output = sh.is_valid_resource(mock_name) assert output is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def _resource_name_check(self, resource_name):\n return self._name_check(resource_name, 'resources')", "def test_nonstandard_resource(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['resources']['scalar'].append({'name': 'chocolate', 'value': 1.0 })\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'NONSTANDARD_RESOURCE')", "def _check_r_res(res, o_exp, g_exp, p_exp):\n print green(\"Checking %s owner, group, permissions. \"\n \"Expected: %s, %s, %s\" % (res, o_exp, g_exp, p_exp))\n if exists(res):\n resp = run('stat -c %%U,%%G,%%a %s' % res)\n o_act, g_act, p_act = map(str.strip, resp.split(','))\n if o_act != o_exp or g_act != g_exp or p_act != p_exp:\n abort(red(\"Resource %s params: %s. Expected: %s\" % (\n res, (o_act, g_act, p_act), (o_exp, g_exp, p_exp))))\n print green(\"Resource %s checking passed\" % res)\n else:\n abort(red(\"Resource %s is not exists\" % res))", "def check_validity(self):", "def validate():", "def validate(self):\n # Validate all mandatory keys are present\n if not self.mandatory_keys.issubset(set(self.resource)):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] miss a \"\n \"mandatory key. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource does not contains extra keys\n if not set(self.resource).issubset(self.keys):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] contains \"\n \"extra keys. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource value type\n for key, value in self.resource.items():\n if not isinstance(value, self.__class__.MODEL[key][0]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data type (expected: %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][0]))\n # For str type validate the content as according the regex\n if self.__class__.MODEL[key][0] is str:\n if not re.match(self.__class__.MODEL[key][1], value):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))\n # For list type validate the content as according the regex\n if self.__class__.MODEL[key][0] is list:\n if not all([re.match(self.__class__.MODEL[key][1], v)\n for v in value]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))", "def __validateResourceStateEntry(self, resource: Dict[str, str]):\n if AZ_RESOURCE_ID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (\n self.fullName, AZ_RESOURCE_ID))\n if SID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, SID))\n if ARM_TYPE not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, ARM_TYPE))", "def mex_validation(resource):\n resource_name = [n for n in list(resource._fields) if getattr(resource,n) != '']\n for name in list(resource_name):\n url = getattr(resource,name)\n log.debug(\"resource: %s\" % url)\n try:\n o = urlparse.urlsplit(url)\n url_path = o.path\n log.debug('url_path :%s' % url_path)\n m = re.match('\\/(?P<service>[\\w-]+)\\/(image[s]?\\/|)(?P<id>[\\w-]+)', url_path)\n if m is not None:\n if m.group('service') == 'image_service' or m.group('service') == 'data_service': #check for data_service\n if 'pixels' not in url_path: #if false requires a redirect\n ident = m.group('id') #seaching a plan image_service or data_service url\n if check_access(ident) is True:\n continue #check next resource\n\n# # Try to route internally through bisque\n# resp = request_internally(url)\n# if resp.status_int < 400:\n# if resp.status_int == 302:\n# #reset the url to the redirected url\n# redirect_url = resp.headers.get('Location')\n# if redirect_url is not None: #did not find the redirect\n# log.debug('Redirect Url: %s' % redirect_url)\n# resource = resource._replace(**{name:redirect_url})\n# continue\n# else:\n# continue\n\n # Try to route externally\n resp = request_externally(url)\n if resp.status_code < 400:\n if resp.status_code == 302:\n #reset the url to the redirected url\n redirect_url = resp.headers.get('Location')\n if redirect_url is not None: #did not find the redirect\n log.debug('Redirect Url: %s' % redirect_url)\n resource = resource._replace(**{name:redirect_url})\n continue\n else:\n continue\n\n raise InvalidResourceError(resource_url=url, error_code=403, error_message='Resource: %s Not Found' % url)\n\n except StandardError:\n log.exception (\"While retrieving URL %s\" %str(resource))\n raise InvalidResourceError(resource_url=url, error_code=403, error_message='Resource: %s Not Found' % url)\n\n return resource", "def test(self, resource):\n return resource.meta.fields[self.name].present(resource)", "def test__try_run_rest(self):\n\n with self.assertRaises(ValueError) as error:\n self.client.data_object.reference._try_run_rest(\"\", \"\", \"\", \"validate\")\n check_error_message(self, error, \"'validate' not supported!\")", "def _check_validity(self):\n pass", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def test_validate_get_single_resource(client):\n response = client.get('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def IsOk(self):\r\n \r\n return True", "def test_format_resource():\n mock_name = \"rg-001\"\n output = sh.format_resource(mock_name)\n assert output == \"rg-001\"", "def is_resource(space, w_obj):\n return space.wrap(space.is_resource(w_obj))", "def validate(self, response):\n return response[\"status_code\"] == 1", "def test_custom_resource():\n data = {\n 'name': 'Wort wort',\n 'slug': 'sluggy',\n 'not_valid': 'nooo'\n }\n instance = PeopleResource(**data)\n # We should have this attribute\n assert hasattr(instance, 'name')\n # But this one is missing\n assert not hasattr(instance, 'another_thing')\n # and this one is not valid\n assert not hasattr(instance, 'not_valid')\n assert instance.__str__() == '<People | Wort wort>'\n # It should also have parent Meta attributes\n assert hasattr(instance.Meta, 'valid_status_codes')", "def test_resource_err(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n err_resp = (K8sResource(\"\", \"\", \"\", False, \"\"), True)\n MM = MetaManifest\n\n # Sanity check: ask for a valid StatefulSet.\n _, err = k8s.resource(config, MM(\"apps/v1\", \"StatefulSet\", \"ns\", \"name\"))\n assert not err\n\n # Ask for a StatefulSet on a bogus API endpoint.\n assert k8s.resource(config, MM(\"bogus\", \"StatefulSet\", \"ns\", \"name\")) == err_resp\n\n # Ask for a bogus K8s kind.\n assert k8s.resource(config, MM(\"v1\", \"Bogus\", \"ns\", \"name\")) == err_resp\n assert k8s.resource(config, MM(\"\", \"Bogus\", \"ns\", \"name\")) == err_resp", "def is_valid(self): # -> bool:\n ...", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def test_object_provision_command_when_valid_arguments_provided_human_readable(\n mock_client, object_provision_success, object_provision_success_hr\n):\n resp = prepare_object_provision_output(object_provision_success)\n assert resp == object_provision_success_hr", "def is_valid(self):\n return _drafter.check_blueprint(self.content)", "async def validate(self, ctx: Context, argument: str) -> bool:\n return True", "def test_col_resource_status_valid():\n ident = _id()\n status = proj.fetch('test', ident)\n status = proj.status('test', ident)\n assert status == 'complete'", "def check_errors(self) -> None:", "def test_validate(self):\n pass", "def test_has_valid_resource(self):\n ev = self.qctx(Evidence)()\n allEvidence = set(ev.load())\n qualityEvidence = set()\n for evobj in allEvidence:\n ref = evobj.reference()\n if isinstance(ref, Document):\n doi = ref.doi()\n if doi:\n for pat in DOI_REGEXEN:\n if pat.match(doi):\n qualityEvidence.add(evobj)\n break\n else: # no break\n continue\n\n urls = ref.uri.get()\n good_uris = True\n for uri in urls:\n parsed = urlparse(uri)\n if not parsed.scheme or not parsed.netloc:\n good_uris = False\n break\n\n if not good_uris:\n continue\n elif isinstance(ref, Website):\n urls = ref.url.get()\n urls = list(urls)\n print(urls)\n good_uris = True\n for uri in urls:\n parsed = urlparse(uri)\n if not parsed.scheme or not parsed.netloc:\n good_uris = False\n break\n\n if not good_uris:\n continue\n qualityEvidence.add(evobj)\n\n self.assertSetEqual(allEvidence, qualityEvidence,\n msg='\\n'.join(str(x.reference()) for x in (allEvidence - qualityEvidence)))", "def validate_result(self):\n raise NotImplementedError" ]
[ "0.70926225", "0.6632987", "0.6630718", "0.6436435", "0.6400924", "0.6346754", "0.63392484", "0.6337061", "0.62401545", "0.6238799", "0.6135413", "0.6118114", "0.6117826", "0.60815805", "0.6058132", "0.6018527", "0.6007674", "0.6001032", "0.59417886", "0.5941082", "0.59347427", "0.5927204", "0.5922923", "0.5890479", "0.58760464", "0.5871992", "0.58706194", "0.5857708", "0.5855135", "0.5852116" ]
0.7810775
0
Verify the output of 'random_password' function
def test_random_password(): output = sh.random_password() assert isinstance(output, str) is True assert len(output) == 16
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def anypassword():\n\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(characters) for x in range(size))\n\n return password", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def randompassword():\n\n chars = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(chars) for x in range(size))\n\n return password", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def passwordGen() :\n\treturn __randomString(12)", "def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))", "def verify_password(saved_password, password):\n return check_password_hash(saved_password, password)", "def verify_match(password, verify):\n return password == verify", "def random_password():\n pass_len = secrets.choice(range(32, 49))\n return ''.join(secrets.choice(string.printable)\n for _ in range(pass_len))", "def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)", "def verify_password(stored_password, provided_password):\n #print(provided_password)\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512', \n provided_password.encode('utf-8'), \n salt.encode('ascii'), \n 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n #print(pwdhash)\n return pwdhash == stored_password", "def check_credentials(input_password, real_password):\n return pwd_context.verify(input_password, real_password)", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def randompassword():\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n return ''.join(random.choice(characters) for x in range(size))", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def test_hash_password(matrix, mock_check_output):\n result = matrix.hash_password(\"testpassword\")\n assert result == \"mocked-output\"", "def test_invalid_password(self):\n pass", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def password(self) -> str:", "def verify_password(self, password):\n return self.PASS == password", "def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass", "def verify_password(stored_password, provided_password):\r\n salt = stored_password[:64]\r\n stored_password = stored_password[64:]\r\n pwdhash = hashlib.pbkdf2_hmac('sha512', \r\n provided_password.encode('utf-8'), \r\n salt.encode('ascii'), \r\n 100000)\r\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\r\n return pwdhash == stored_password", "def check_password(self, author, password):\n return author.hashed_password == generate_hashed_passwd(password, author.hashed_password)" ]
[ "0.74017847", "0.72623706", "0.72584105", "0.7210661", "0.720449", "0.71868634", "0.71583515", "0.7146903", "0.70125735", "0.6999681", "0.6986031", "0.69559073", "0.6949859", "0.6937893", "0.693696", "0.6924991", "0.6912025", "0.6911347", "0.6883031", "0.6875243", "0.68527275", "0.6836567", "0.6797564", "0.6751813", "0.6744038", "0.6736064", "0.67315954", "0.6722271", "0.6713627", "0.67122436" ]
0.8123155
0
Verify the output of 'list_differences' function
def test_list_differences(): mock_list_a = ['a', 'b', 'c', 'd', 'e'] mock_list_b = ['a', 'b', 'c'] output = sh.list_differences(mock_list_a, mock_list_b) assert output == ['d', 'e'] output = sh.list_differences(mock_list_b, mock_list_a) assert output == []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_difference_between_two_lists():\n # same lists, no error\n list1 = list2 = [0, 1, 2]\n util.check_difference_between_two_lists(list1, list2, name=\"same case\")\n\n # diff lists with same unique numbers\n list_1 = [0, 1, 2]\n list_2 = [1, 2, 0]\n with pytest.raises(ValueError) as err_info:\n util.check_difference_between_two_lists(list_1, list_2, name=\"diff case\")\n assert \"diff case are not identical\" in str(err_info.value)\n\n # totally diff lists\n list_1 = [0, 1, 2]\n list_2 = [3, 4, 5]\n with pytest.raises(ValueError) as err_info:\n util.check_difference_between_two_lists(list_1, list_2, name=\"diff case\")\n assert \"diff case are not identical\" in str(err_info.value)", "def test_find_diff(self):\n\n # Ensure lists and sets are handled appropriately\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets([3, 2], [2, 3, 2]))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets([1, 2, 3], [2, 3, 4]))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets({3, 2}, {2, 3}))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets({1, 2, 3}, {2, 3, 4}))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets({2, 3}, [2, 3]))\n self.assertEqual([[1], [2, 3], [4]],\n utils.find_diff_of_lists_and_sets([1, 2, 3], {2, 3, 4}))\n self.assertEqual([None, {1, 2}],\n utils.find_diff_of_lists_and_sets(None, {1, 2}))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_lists_and_sets(None, None))\n\n # Ensure ints and floats are handled appropriately\n self.assertEqual(1, utils.find_diff_of_numbers(5, 4))\n self.assertEqual(1.0, utils.find_diff_of_numbers(5.0, 4.0))\n self.assertEqual(1.0, utils.find_diff_of_numbers(5.0, 4))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(5.0, 5.0))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(5, 5.0))\n self.assertEqual([4, None],\n utils.find_diff_of_numbers(4, None))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(None, None))\n\n # Ensure strings are handled appropriately\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_strings_and_bools(\"Hello\", \"Hello\"))\n self.assertEqual([\"Hello\", \"team\"],\n utils.find_diff_of_strings_and_bools(\"Hello\", \"team\"))\n self.assertEqual(\"unchanged\",\n utils.find_diff_of_strings_and_bools(None, None))\n\n # Ensure dates are handled appropriately\n a = datetime(2021, 6, 28)\n b = datetime(2021, 6, 27, 1)\n self.assertEqual(\"unchanged\", utils.find_diff_of_dates(a, a))\n self.assertEqual(\"+23:00:00\", utils.find_diff_of_dates(a, b))\n self.assertEqual(\"-23:00:00\", utils.find_diff_of_dates(b, a))\n self.assertEqual([\"06/28/21 00:00:00\", None], utils.find_diff_of_dates(a, None))\n self.assertEqual(\"unchanged\", utils.find_diff_of_numbers(None, None))\n\n # Ensure that differencing dictionaries is handled appropriately\n dict1 = {\n \"a\": 0.25,\n \"b\": 0.0,\n \"c\": [1, 2],\n \"d\": datetime(2021, 6, 28),\n \"e\": \"hi\",\n \"f\": \"hi2\"\n }\n dict2 = {\n \"a\": 0.25,\n \"b\": 0.01,\n \"c\": [2, 3],\n \"d\": datetime(2021, 6, 27, 1),\n \"e\": \"hihi\",\n \"g\": 15\n }\n expected_diff = {\n \"a\": \"unchanged\",\n \"b\": -0.01,\n \"c\": [[1], [2], [3]],\n \"d\": \"+23:00:00\",\n \"e\": [\"hi\", \"hihi\"],\n \"f\": [\"hi2\", None],\n \"g\": [None, 15]\n }\n self.assertDictEqual(expected_diff, utils.find_diff_of_dicts(dict1, dict2))", "def _compare_list(self, name, actual, expect):\n self.op_test.assertListEqual(\n actual.recursive_sequence_lengths(),\n expect[1],\n \"Output (\" + name + \") has different lod at \" + str(place),\n )", "def _compare_list(self, name, actual, expect):\n with fluid.dygraph.base.guard(place=place):\n self.op_test.assertListEqual(\n actual.value()\n .get_tensor()\n .recursive_sequence_lengths(),\n expect[1],\n \"Operator (\"\n + self.op_type\n + \") Output (\"\n + name\n + \") has different lod at \"\n + str(place)\n + \" in dygraph mode\",\n )", "def differences(data: list) -> list:\n differences = []\n iterable, copy = tee(data)\n next(copy) # adjusts copy of my iterable up 1 element\n for x, y in zip(iterable, copy):\n differences.append(abs(x - y))\n\n return differences", "def differences(input_list):\n output_list = []\n for x in range(1,len(input_list)):\n output_list.append(input_list[x]-input_list[x-1])\n return output_list", "def test_difference(self, client):\n\n expected = {\n 'a': [0,2,4,6,8],\n 'b': [4,6,8,10,12,14,16],\n 'result': [0, 2]\n }\n\n res = client.post('/api/v1/difference', json={'a': expected['a'], 'b': expected['b'] })\n assert res.status_code == 200\n assert res.json['data'] == expected['result']\n assert res.json['status'] == 2000", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def unorderable_list_difference(expected, actual, ignore_duplicate=False):\r\n missing = []\r\n unexpected = []\r\n while expected:\r\n item = expected.pop()\r\n try:\r\n actual.remove(item)\r\n except ValueError:\r\n missing.append(item)\r\n if ignore_duplicate:\r\n for lst in expected, actual:\r\n try:\r\n while True:\r\n lst.remove(item)\r\n except ValueError:\r\n pass\r\n if ignore_duplicate:\r\n while actual:\r\n item = actual.pop()\r\n unexpected.append(item)\r\n try:\r\n while True:\r\n actual.remove(item)\r\n except ValueError:\r\n pass\r\n return missing, unexpected\r\n\r\n # anything left in actual is unexpected\r\n return missing, actual", "def list_difference(list1, list2):\r\n diff_list = []\r\n for item in list1:\r\n if not item in list2:\r\n diff_list.append(item)\r\n else:\r\n if list2.count(item) != list1.count(item) and not item in diff_list:\r\n diff_list.append(item) \r\n return diff_list", "def significantly_different(\n self, list_a, list_b,\n significance_level=SIGNIFICANCE_LEVEL): # pragma: no cover\n step_result = self.api.m.python(\n 'Checking sample difference',\n self.api.resource('significantly_different.py'),\n [json.dumps(list_a), json.dumps(list_b), str(significance_level)],\n stdout=self.api.m.json.output())\n results = step_result.stdout\n if results is None:\n assert self.dummy_builds\n return True\n significantly_different = results['significantly_different']\n step_result.presentation.logs[str(significantly_different)] = [\n 'See json.output for details']\n return significantly_different", "def checkdifferences(oldfile, changelist, num):\n if num == 1: #combining the unique values of a list & file into 1 list\n newcontent = changelist\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n combined = combinelists(currentcontent, newcontent)\n return combined\n if num == 2: # combine the unique values of 2 files into 1 list\n currentcontent = csv_read('{}.csv'.format(changelist)) #clientlist\n combined = []\n for each in currentcontent:\n # for elk in each:\n combined + each\n newlst = combinelists(currentcontent, combined)\n return newlst\n if num == 3: # removing the doubles from each list\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n changecontent = changelist\n newlist = dividelists(currentcontent, changecontent)\n return newlist", "def get_list_diff(list1, list2):\n\n list3 = list(np.setdiff1d(list1,list2))\n return(list3)", "def test_ddiff_v1(self):\n print \"\\n\"\n for d in ddiff_v1(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def test_ndiff(self):\n print \"\\n\"\n for d in ndiff(a, b): print d", "def sorted_list_difference(expected, actual):\r\n i = j = 0\r\n missing = []\r\n unexpected = []\r\n while True:\r\n try:\r\n e = expected[i]\r\n a = actual[j]\r\n if e < a:\r\n missing.append(e)\r\n i += 1\r\n while expected[i] == e:\r\n i += 1\r\n elif e > a:\r\n unexpected.append(a)\r\n j += 1\r\n while actual[j] == a:\r\n j += 1\r\n else:\r\n i += 1\r\n try:\r\n while expected[i] == e:\r\n i += 1\r\n finally:\r\n j += 1\r\n while actual[j] == a:\r\n j += 1\r\n except IndexError:\r\n missing.extend(expected[i:])\r\n unexpected.extend(actual[j:])\r\n break\r\n return missing, unexpected", "def difference(list1, list2):\n new_list = []\n for rule1 in list1:\n in_list2 = False\n literals1 = [x.string() for x in rule1]\n for rule2 in list2:\n literals2 = [x.string() for x in rule2]\n if literals1 == literals2:\n in_list2 = True\n if not in_list2:\n new_list.append(rule1)\n return new_list", "def listops_difference(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item not in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def reverse_difference():", "def get_reverse_complement_unit_tests():\n # list of [input, expected output]\n data_list = [\n [\"ATGCCCGCTTT\", \"AAAGCGGGCAT\"],\n [\"AAAGCGGGCAT\", \"ATGCCCGCTTT\"],\n [\"CCGCGTTCA\", \"TGAACGCGG\"],\n [\"AAAA\", \"TTTT\"],\n [\"TTTT\", \"AAAA\"],\n [\"CCCC\", \"GGGG\"],\n [\"GGGG\", \"CCCC\"],\n [\"ATGGGAATGA\", \"TCATTCCCAT\"],\n [\"TCATTCCCAT\", \"ATGGGAATGA\"],\n [\"TTTAAAGGGCCC\", \"GGGCCCTTTAAA\"],\n [\"GGGCCCTTTAAA\", \"TTTAAAGGGCCC\"],\n [\"ATCGATCG\", \"CGATCGAT\"],\n [\"CGATCGAT\", \"ATCGATCG\"],\n ]\n for data in data_list:\n if len(data) == 2:\n print \"input: \" + str(data[0]) + \",\" ,\n print \"expected output: \" + str(data[1]) + \",\",CTAATGATGCCCCAT\n o = get_reverse_complement(data[0])\n print \"actual output: \" + str(o)\n if o != data[1]:\n print \"## Test Fail Here!\"", "def check_difference_between_two_lists(list1: list, list2: list):\n\n list1_unique = sorted(set(list1) - set(list2))\n list2_unique = sorted(set(list2) - set(list1))\n if len(list2_unique) != 0 or len(list1_unique) != 0:\n raise ValueError(\n \"two lists are not identical\\n\"\n \"list1 has unique elements {}\\n\"\n \"list2 has unique elements {}\\n\".format(list1_unique, list2_unique)\n )", "def pytest_assertrepr_compare(op: str, left: Any, right: Any) -> List[str]: # noqa: U100\n output = [\"Compare Result:\"]\n\n for line in list(dictdiffer.diff(left, right)):\n output.extend(pp.pformat(line).split(\"\\n\"))\n\n return output", "def test_diff(self):\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"U\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"UCCCCCUC\"), 3)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").diff(\"CCCCC\"), 5)\n # raises TypeError if other not iterable\n self.assertRaises(TypeError, self.RNA(\"AAAAA\").diff, 5)", "def diff(list1, list2):\n\tc = set(list1).union(set(list2))\n\td = set(list1).intersection(set(list2))\n\treturn len(list(c - d))", "def differences(self):\n return self._differences", "def list_difference(l1: List[Any], l2: List[Any]) -> List[Any]:\n return [item for item in l1 if item not in l2]", "def test_list(l):\n temp = [str(t) for t in l]\n temp2 = ' '.join(temp)\n print temp2\n s = subprocess.Popen(\"./clifford\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n myinput = str(order)+'\\n'+temp2\n myoutput = s.communicate(input=myinput)\n print myinput\n print myoutput\n if \"lag\" in myoutput[0]:\n print\n print \"Flag found!\"\n print myoutput\n exit()", "def list_difference(list1, list2):\n diff_list = []\n for item in list1:\n if not item in list2:\n diff_list.append(item)\n return diff_list", "def diff(before: list, after: list) -> (list, list):\n additions = [item for item in after if item not in before]\n removals = [item for item in before if item not in after]\n return additions, removals", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\")) is None\n assert (\n printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\"), ext=0) is None\n )\n assert (\n printdiff(\n self.data(\"o4sp040b0_raw.fits\"),\n self.data(\"o4sp040b0_raw.fits\"),\n extname=\"sci\",\n )\n is None\n )\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(OSError):\n printdiff(\"o4sp040b0_raw.fits\", \"fakefile.fits\", extname=\"sci\")\n\n # Test HDU object inputs\n with fits.open(self.data(\"stddata.fits\"), mode=\"readonly\") as in1:\n with fits.open(self.data(\"checksum.fits\"), mode=\"readonly\") as in2:\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)" ]
[ "0.73093635", "0.71023244", "0.7012757", "0.6704099", "0.6681043", "0.6674949", "0.66015595", "0.6591573", "0.65760916", "0.6567296", "0.65616626", "0.6558527", "0.6547126", "0.6513799", "0.65075797", "0.64695394", "0.64616483", "0.6440873", "0.63981354", "0.638816", "0.6374399", "0.63058245", "0.6301726", "0.6298475", "0.6272127", "0.6204838", "0.6190178", "0.6164322", "0.6157786", "0.6116884" ]
0.81505835
0
Verify the output of 'process_id' function
def test_process_id(): output = sh.process_id() assert isinstance(output, int) and output > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_processid(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='processid',\n cmd_key='sh_ospf_ints')) == 1, 'OSPF Interface: process ID not found')", "def check_process(dbcur, process_id):\n sql = \"\"\"select * from process where id = '\"\"\" + process_id + \"\"\"'\"\"\"\n dbcur.execute(sql)\n result = dbcur.fetchall()\n\n return len(result) == 0", "def test_process_parent_id():\n output = sh.process_parent_id()\n assert isinstance(output, int) and output > 0", "def getpid(command):\n try:\n _pidof = executeCommand(command)\n except Exception as er:\n print (\" not able to get pid\")\n return False\n return _pidof", "def check_if_sa_running(self, process):\n try:\n err, pid, _ = self.connection.execute(\"pgrep -f %s\" % process)\n # strip whitespace\n return err, pid.strip()\n except OSError as e:\n if e.errno in {errno.ECONNRESET}:\n # if we can't connect to check, then we won't be able to connect to stop it\n LOG.exception(\"can't connect to host to check collectd status\")\n return 1, None\n raise", "def check_process(self, instance, process):\n\n instance = self.get_instance(instance)\n output = \"\"\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n output = subprocess.check_output([\"ssh\", key, username, 'ps', 'aux', '|', 'grep', process]).decode(\n \"utf-8\")\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n output = subprocess.check_output(\n [\"ssh\", '-i', key, username, 'ps', 'aux', '|', 'grep', process]).decode(\"utf-8\")\n return output\n except:\n return \"Faile to access the instance\"", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode", "def test_different_pid(self):\n testcase = self.root.find('./testcase[@classname=\"support.PassingTest\"]')\n systemout = testcase.find('system-out')\n test_pid = systemout.text.replace('pid: ', '').replace('\\n', '')\n self.assertNotEqual(str(os.getpid()), test_pid)", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def test_get_pid_and_server():\n assert re.match(\"^[0-9]+@[a-zA-Z0-9_]+\", util.get_pid_and_server())", "def test_003_pid(self):\n HEADING()\n pid = self.db.pid()\n print (pid)\n assert True", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def get_process_id():\n process_id = os.environ[\"WS_PROCESS_ID\"]\n return process_id", "def is_pid_valid(pid):\n return get_pid_status(pid)[1]", "async def get_process_id() -> int:\n print(\"Await sleep\")\n logger = getLogger(__name__)\n logger.debug(\"Get process id\")\n # Wait for starting subprocess\n # otherwise, time.sleep() will block starting subprocess.\n current_process = psutil.Process(os.getpid())\n while len(current_process.children()) < 2:\n print(len(current_process.children()))\n await asyncio.sleep(0.01)\n logger.debug(\"Start sleep\")\n time.sleep(SECOND_SLEEP_FOR_TEST_LONG)\n print(\"Kill all processes in this window.\")\n return 0", "def process_id(job_id):\n pass # Not implemented yet", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def ppid(self):", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def generate_fwan_process_id() -> str:\n return str(uuid.uuid4())", "def pid():\n return 0x0204", "def pid():\n return 0x0204", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def check_openvpn_pid():\n return call_command('ps aux')[0].split('\\n')", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result" ]
[ "0.74825627", "0.6971474", "0.6877758", "0.6502754", "0.640636", "0.6317062", "0.63126904", "0.6285647", "0.62607557", "0.62258863", "0.6213967", "0.61965364", "0.61815655", "0.61815655", "0.61018986", "0.60734487", "0.59861445", "0.5978445", "0.59657335", "0.5962974", "0.59304786", "0.58688456", "0.5853568", "0.58358055", "0.58358055", "0.58047944", "0.5792544", "0.5792544", "0.5777521", "0.57754683" ]
0.81981176
0
Verify the output of 'process_parent_id' function
def test_process_parent_id(): output = sh.process_parent_id() assert isinstance(output, int) and output > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0", "def get_parent_pid(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlDialectInspection,SqlNoDataSourceInspection\n parent_pids = wmi.ExecQuery(\n 'SELECT ParentProcessID FROM Win32_Process WHERE ProcessID=%s' % pid\n )\n if not parent_pids:\n return None\n return only(parent_pids).Properties_('ParentProcessID').Value", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def wait_for_parent():\n wait_for_pid(os.getppid())", "def parent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"parent_id\")", "async def _check_parent():\n try:\n curr_proc = psutil.Process()\n parent_death_cnt = 0\n while True:\n parent = curr_proc.parent()\n # If the parent is dead, it is None.\n parent_gone = parent is None\n init_assigned_for_parent = False\n parent_changed = False\n\n if parent:\n # Sometimes, the parent is changed to the `init` process.\n # In this case, the parent.pid is 1.\n init_assigned_for_parent = parent.pid == 1\n # Sometimes, the parent is dead, and the pid is reused\n # by other processes. In this case, this condition is triggered.\n parent_changed = self.ppid != parent.pid\n\n if parent_gone or init_assigned_for_parent or parent_changed:\n parent_death_cnt += 1\n logger.warning(\n f\"Raylet is considered dead {parent_death_cnt} X. \"\n f\"If it reaches to {_PARENT_DEATH_THREASHOLD}, the agent \"\n f\"will kill itself. Parent: {parent}, \"\n f\"parent_gone: {parent_gone}, \"\n f\"init_assigned_for_parent: {init_assigned_for_parent}, \"\n f\"parent_changed: {parent_changed}.\"\n )\n if parent_death_cnt < _PARENT_DEATH_THREASHOLD:\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n continue\n\n log_path = os.path.join(self.log_dir, \"raylet.out\")\n error = False\n msg = f\"Raylet is terminated: ip={self.ip}, id={self.node_id}. \"\n try:\n with open(log_path, \"r\", encoding=\"utf-8\") as f:\n # Seek to _RAYLET_LOG_MAX_TAIL_SIZE from the end if the\n # file is larger than that.\n f.seek(0, io.SEEK_END)\n pos = max(0, f.tell() - _RAYLET_LOG_MAX_TAIL_SIZE)\n f.seek(pos, io.SEEK_SET)\n # Read remaining logs by lines.\n raylet_logs = f.readlines()\n # Assume the SIGTERM message must exist within the last\n # _RAYLET_LOG_MAX_TAIL_SIZE of the log file.\n if any(\n \"Raylet received SIGTERM\" in line\n for line in raylet_logs\n ):\n msg += \"Termination is graceful.\"\n logger.info(msg)\n else:\n msg += (\n \"Termination is unexpected. Possible reasons \"\n \"include: (1) SIGKILL by the user or system \"\n \"OOM killer, (2) Invalid memory access from \"\n \"Raylet causing SIGSEGV or SIGBUS, \"\n \"(3) Other termination signals. \"\n f\"Last {_RAYLET_LOG_MAX_PUBLISH_LINES} lines \"\n \"of the Raylet logs:\\n\"\n )\n msg += \" \" + \" \".join(\n raylet_logs[-_RAYLET_LOG_MAX_PUBLISH_LINES:]\n )\n error = True\n except Exception as e:\n msg += f\"Failed to read Raylet logs at {log_path}: {e}!\"\n logger.exception(msg)\n error = True\n if error:\n logger.error(msg)\n # TODO: switch to async if necessary.\n ray._private.utils.publish_error_to_driver(\n ray_constants.RAYLET_DIED_ERROR,\n msg,\n gcs_publisher=ray._raylet.GcsPublisher(\n address=self.gcs_address\n ),\n )\n else:\n logger.info(msg)\n sys.exit(0)\n else:\n parent_death_cnt = 0\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n except Exception:\n logger.exception(\"Failed to check parent PID, exiting.\")\n sys.exit(1)", "def get_parent_id():\n return getattr(threadlocal, \"parent_id\", None)", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()", "def mempool_assert_my_parent_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:\n if unspent.coin.parent_coin_info != condition.vars[0]:\n return Err.ASSERT_MY_PARENT_ID_FAILED\n return None", "def context_parent_id(self) -> str | None:\n return bytes_to_ulid_or_none(self.context_parent_id_bin)", "def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")", "def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")", "def test_different_pid(self):\n testcase = self.root.find('./testcase[@classname=\"support.PassingTest\"]')\n systemout = testcase.find('system-out')\n test_pid = systemout.text.replace('pid: ', '').replace('\\n', '')\n self.assertNotEqual(str(os.getpid()), test_pid)", "async def get_process_id() -> int:\n print(\"Await sleep\")\n logger = getLogger(__name__)\n logger.debug(\"Get process id\")\n # Wait for starting subprocess\n # otherwise, time.sleep() will block starting subprocess.\n current_process = psutil.Process(os.getpid())\n while len(current_process.children()) < 2:\n print(len(current_process.children()))\n await asyncio.sleep(0.01)\n logger.debug(\"Start sleep\")\n time.sleep(SECOND_SLEEP_FOR_TEST_LONG)\n print(\"Kill all processes in this window.\")\n return 0", "def test_processid(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='processid',\n cmd_key='sh_ospf_ints')) == 1, 'OSPF Interface: process ID not found')", "def get_parent_id(task_dict: dict) -> int | None:\n return task_dict['parent']['id'] \\\n if task_dict['parent'] and 'id' in task_dict['parent'] else None", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def test_subprocess_fork_pid0(self, mocker):\n mocker.stopall()\n\n test_command = [\"who\", \"-b\"]\n test_name = \"test_who\"\n test_fork = True\n pid = 0\n\n # mock\n mock_logging_debug = mocker.MagicMock(name=\"mock_logging_debug\")\n mock_os_fork = mocker.MagicMock(name=\"mock_os_fork\", return_value=pid)\n mock_sys_exit = mocker.MagicMock(name=\"mock_sys_exit\")\n mock_os_chdir = mocker.MagicMock(name=\"mock_os_chdir\")\n mock_os_setsid = mocker.MagicMock(name=\"mock_os_setsid\")\n mock_os_umask = mocker.MagicMock(name=\"mock_os_umask\")\n\n # patch\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"debug\", mock_logging_debug\n )\n mocker.patch.object(scarlett_os.subprocess.os, \"fork\", mock_os_fork)\n mocker.patch.object(scarlett_os.subprocess.sys, \"exit\", mock_sys_exit)\n mocker.patch.object(scarlett_os.subprocess.os, \"chdir\", mock_os_chdir)\n mocker.patch.object(scarlett_os.subprocess.os, \"setsid\", mock_os_setsid)\n mocker.patch.object(scarlett_os.subprocess.os, \"umask\", mock_os_umask)\n\n scarlett_os.subprocess.Subprocess(test_command, name=test_name, fork=test_fork)\n\n assert mock_sys_exit.call_count == 0\n\n mocker.stopall()", "def get_parent_id_from_trace_id():\n trace_id = get_trace_id()\n return trace_id.parent_id", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result", "def ppid(self):", "def getpid(command):\n try:\n _pidof = executeCommand(command)\n except Exception as er:\n print (\" not able to get pid\")\n return False\n return _pidof", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def test_003_pid(self):\n HEADING()\n pid = self.db.pid()\n print (pid)\n assert True", "def parent_custom_ip_prefix_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"parent_custom_ip_prefix_id\")", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def check_exec_parentchild(modname, dataobjs, xsectname, xsectdict, indent=''):\n #pylint: disable=unbalanced-tuple-unpacking\n # assumes check_exec_input and check_exec_output have already been executed so there are entries in dataobjs\n\n cnts = [0] * NUMCNTS\n if pfwdefs.SW_PARENTCHILD in xsectdict:\n print \"%sChecking %s %s...\" % (indent, xsectname, pfwdefs.SW_PARENTCHILD)\n indent += ' '\n #print \"%sxsectdict[pfwdefs.SW_PARENTCHILD] = %s\" % (indent, xsectdict[pfwdefs.SW_PARENTCHILD])\n #print \"%sdataobjs[pfwdefs.SW_INPUTS] = %s\" % (indent, dataobjs[pfwdefs.SW_INPUTS])\n #print \"%sdataobjs[pfwdefs.SW_OUTPUTS] = %s\" % (indent, dataobjs[pfwdefs.SW_OUTPUTS])\n #print \"%sfsplit = %s\" % (indent, miscutils.fwsplit(xsectdict[pfwdefs.SW_PARENTCHILD], ',') )\n\n msginfo = \"module %s, %s, %s\" % \\\n (modname, xsectname, pfwdefs.SW_PARENTCHILD)\n for pair in miscutils.fwsplit(xsectdict[pfwdefs.SW_PARENTCHILD], ','):\n pair = pair.lower()\n if ':' in pair:\n (parent, child) = miscutils.fwsplit(pair, ':')\n if '.' in parent:\n if parent not in dataobjs[pfwdefs.SW_INPUTS]:\n error(indent, \"%s - parent %s not listed in %s\" % \\\n (msginfo, parent, pfwdefs.SW_INPUTS))\n cnts[ERRCNT_POS] += 1\n else:\n error(indent, \"%s - parent %s missing section label\" % \\\n (msginfo, parent))\n cnts[ERRCNT_POS] += 1\n\n if '.' in child:\n if child not in dataobjs[pfwdefs.SW_OUTPUTS]:\n error(indent, \"%s - child %s not listed in %s\" % \\\n (msginfo, child, pfwdefs.SW_OUTPUTS))\n cnts[ERRCNT_POS] += 1\n else:\n error(indent, \"%s - child %s missing section label\" % \\\n (msginfo, child))\n cnts[ERRCNT_POS] += 1\n else:\n error(indent, \"%s - Invalid parent/child pair (%s). Missing colon.\" % \\\n (msginfo, pair))\n cnts[ERRCNT_POS] += 1\n elif pfwdefs.SW_INPUTS in xsectdict and pfwdefs.SW_OUTPUTS in xsectdict:\n msginfo = \"module %s, %s\" % \\\n (modname, xsectname)\n warning(indent, \"%s - has %s and %s, but not %s\" % \\\n (msginfo, pfwdefs.SW_INPUTS, pfwdefs.SW_OUTPUTS, pfwdefs.SW_PARENTCHILD))\n cnts[WARNCNT_POS] += 1\n\n return cnts", "def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )" ]
[ "0.69360155", "0.68315536", "0.66632384", "0.65708196", "0.6495518", "0.6494205", "0.6362053", "0.6315289", "0.61751837", "0.6169541", "0.61215544", "0.61215544", "0.60680795", "0.59970415", "0.5961881", "0.5926512", "0.582809", "0.5824055", "0.58003867", "0.57824874", "0.5769921", "0.5758021", "0.57387626", "0.57351094", "0.5711329", "0.5695072", "0.5681211", "0.5681211", "0.5675889", "0.5658862" ]
0.8797357
0
Verify the output of 'current_path' function
def test_current_path(): output = sh.current_path() assert isinstance(output, str) and len(output) > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"check_path\")", "def current_dir_ok() :\n\n current_ok = True\n\n current_dir = str(os.environ['PWD'])\n expected_dir = str(os.path.abspath(out_dir))\n\n if not current_dir == expected_dir :\n print \"ERROR current directory (%s) is not the output directory for the ntuples (%s)\"%(current_dir, expected_dir)\n current_ok = False\n\n return current_ok", "def test_path(self):\n self.assertEqual(\n self.log.current_log_path,\n f'{self.path}/.{datetime.now(ET).date().isoformat()}.log'\n )", "def get_current_path(self):\r\n path_2 = self.path.replace('\\\\', '/')\r\n return self.copy_to_clipboard(path_2)", "def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")", "def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")", "def getCurrentPath(*p):\n\tfrom os.path import join\n\tfrom os import getcwd\n\tresult = getcwd()\n\tif len(p) > 0:\n\t\treturn join(result, *p)\n\treturn result", "def current_directory (self):\r\n pass", "def GetCurrentDir(self) -> str:\n ...", "def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path", "def current():\n result = run(\"ls -ld %(current_dir)s | awk '{print $11}'\" % env)\n return result.split('/')[-1]", "def is_valid_path(input_path):\n if not os.path.exists(input_path):\n print('\\'{}\\' is not a valid path.'.format(input_path))\n exit(1)\n return input_path", "def started_path(self):\n if self.ros_node.get_data('/diff_drive/path_achieved') is None:\n return False\n return not self.ros_node.get_data('/diff_drive/path_achieved')", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)", "def isPath(self,pin,head=\"check path exist\",exit_on_error=False,logmsg=False):\n p = os.path.abspath(self.expandvars(pin))\n if os.path.isdir(p):\n if logmsg:\n logger.info(head + \"\\n --> dir exist: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n return p\n #--- error no such file\n logger.error(head + \"\\n --> no such directory: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n if exit_on_error:\n raise SystemError(self.__MSG_CODE_PATH_NOT_EXIST)\n return False", "def _check_local_path(self, local_path):\n if not local_path:\n if self.api.user_info['local_os'] == \"windows\":\n local_path = os.path.join(os.environ[\"USERPROFILE\"],\n \"renderfarm_sdk\")\n else:\n local_path = os.path.join(os.environ[\"HOME\"], \"renderfarm_sdk\")\n return local_path", "def get_current_file_uri(self): # real signature unknown; restored from __doc__\n return \"\"", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)", "def getCurrentURL( self, fileName ):\n if fileName:\n if fileName[0] == '/':\n fileName = fileName.lstrip( '/' )\n try:\n fullUrl = '%s/%s' % ( self.cwd, fileName )\n return S_OK( fullUrl )\n except Exception, x:\n errStr = \"Failed to create URL %s\" % x\n return S_ERROR( errStr )", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def currentPreviewPath(self):\n logger.debug(\"Func: currentPreviewPath/getter\")\n if self._currentSceneInfo[\"SubProject\"] is not \"None\":\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"Name\"])\n else:\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"SubProject\"], self._currentSceneInfo[\"Name\"])\n return path\n # if os.path.isdir(path):\n # return path\n # else:\n # return \"\"", "def test_get_out_bash_path(): # ***Incomplete test\n ##########################\n # Arrange.\n outdir = \"outdir\"\n\n ##########################\n # Act.\n #x = get_out_bash_path(outdir)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def curdir(self):\n return self.var('getcwd()')", "def displaypath():\n\n import pathlib\n pth = pathlib.Path('./')\n pth.is_dir()\n pth.absolute()", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)" ]
[ "0.67531914", "0.67101705", "0.6548491", "0.63815373", "0.6233272", "0.6233272", "0.62081504", "0.61196506", "0.60904706", "0.6084025", "0.60738266", "0.60443056", "0.6019613", "0.5900392", "0.58832717", "0.58819306", "0.58666605", "0.58410656", "0.58003384", "0.57874876", "0.57690996", "0.5757863", "0.57523364", "0.5749622", "0.5743949", "0.5725152", "0.5715139", "0.5681255", "0.5662936", "0.5660575" ]
0.79191375
0
Verify the output of 'path_basename' function
def test_path_basename(): mock_path = "E:\\Repos\\pc-setup\\powershell\\provision_python.ps1" output = sh.path_basename(mock_path) assert output == "provision_python.ps1"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basename(path: str) -> str:\n pass", "def basename(path):\r\n return split(path)[1]", "def testBasenamePath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n basename = test_helper.BasenamePath(test_file_path)\n self.assertEqual(basename, 'utmp-linux_libc6')", "def basename(path):\n\n return path.rpartition(\"/\")[2]", "def basename(path):\r\n return path.replace(\"\\\\\", \"/\").split(\"/\")[-1]", "def get_basename(absolute_file_path):\r\n return absolute_file_path.split('/')[-1]", "def basename(file_path):\n return os.path.basename(file_path)", "def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]", "def basename(self):\n return self._getbyspec(\"basename\")[0]", "def basename_sans(path):\n return os.path.splitext(os.path.basename(path))[0]", "def _getFileName(self, filePath):\r\n\t\thead, tail = ntpath.split(filePath)\r\n\t\treturn tail or ntpath.basename(head)", "def path_leaf(path):\n\thead, tail = ntpath.split(path)\n\treturn tail or ntpath.basename(head)", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def pathLeaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)", "def basename(source_file) :\n if source_file is not None and source_file != '' :\n return os.path.basename(source_file)\n\n return ''", "def get_file_name(path):\n return os.path.basename(path)", "def path_leaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)", "def get_name(path):\n return path.rsplit('/',1)[1]", "def get_filename(path):\n return path.split('/')[-1]", "def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"", "def _check_output_prefix(arg: str) -> str:\n\n if \"/\" in arg:\n prefix = arg.rsplit(\"/\", 1)[0]\n _is_valid_file(prefix)\n return arg", "def check_filename(basename):\n return len(basename) <= MAXIMUM_FILENAME_LENGTH", "def purebasename(self):\n return self.namebase", "def base_name(path):\n return os.path.basename(path)", "def name_from_path(path):\n return path[0:-3]", "def basename(self, t):\n t = self.canon(t)\n if isinstance(t, basestring):\n return t\n elif isinstance(t, Sequence):\n t0 = t\n while not isinstance(t0, basestring):\n t0 = t0[0]\n return t0\n else:\n _raise_type_error(t)", "def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]", "def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path", "def basename(file_path: str, extension: bool = False):\n file_name = os.path.basename(file_path)\n if not extension:\n file_name, *_ = file_name.split(\".\")\n return file_name", "def get_basename(file: Union[str, FileStorage]) -> str:\n filename = _retrieve_filename(file)\n # split will split at the final part of the path(image.jpg) and everything\n # before it is at index 0\n return os.path.split(filename)[1]" ]
[ "0.75387126", "0.7183686", "0.71538955", "0.7139029", "0.7134519", "0.6967882", "0.674081", "0.6674592", "0.64930874", "0.64804405", "0.64639074", "0.64589596", "0.64555883", "0.6445215", "0.63873416", "0.6379699", "0.6366947", "0.6366229", "0.6353301", "0.6350111", "0.63350433", "0.63144684", "0.6273124", "0.6269993", "0.6258439", "0.6251247", "0.62456226", "0.62158203", "0.61694944", "0.611951" ]
0.7647868
0
Craft an AMP response containing as many records as will fit within the size limit. Remaining records are stored as a "continuation", identified by a token that is returned to the client to fetch later via the ContinuationCommand.
def _recordsToResponse(self, records): fieldsList = [] count = 0 if records: size = 0 while size < self._maxSize: try: record = records.pop() except (KeyError, IndexError): # We're done. # Note: because records is an iterable (list or set) # we're catching both KeyError and IndexError. break pickled = pickle.dumps(self.recordToDict(record)) size = size + len(pickled) fieldsList.append(pickled) count += 1 response = {"items": fieldsList} if records: response["continuation"] = self._storeContinuation(records, "records") return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _buffer(self, n=None):\n if self._out_of_scope:\n raise ResultConsumedError(self, _RESULT_OUT_OF_SCOPE_ERROR)\n if self._consumed:\n raise ResultConsumedError(self, _RESULT_CONSUMED_ERROR)\n if n is not None and len(self._record_buffer) >= n:\n return\n record_buffer = deque()\n for record in self:\n record_buffer.append(record)\n if n is not None and len(record_buffer) >= n:\n break\n if n is None:\n self._record_buffer = record_buffer\n else:\n self._record_buffer.extend(record_buffer)\n self._exhausted = not self._record_buffer", "def _itemsToResponse(self, items):\n itemsToSend = []\n count = 0\n if items:\n size = 0\n while size < self._maxSize:\n try:\n item = items.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n size = size + len(item)\n itemsToSend.append(item)\n count += 1\n\n response = {\"items\": itemsToSend}\n\n if items:\n response[\"continuation\"] = self._storeContinuation(items, \"items\")\n\n return response", "def make_data(self, limit: int):", "def response_space():", "def response_chunking(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"response_chunking\")", "def stream_n_messages(n):\n response = get_dict(\"url\", \"args\", \"headers\", \"origin\")\n n = min(n, 100)\n\n def generate_stream():\n for i in range(n):\n response[\"id\"] = i\n yield json.dumps(response) + \"\\n\"\n\n return Response(generate_stream(), headers={\"Content-Type\": \"application/json\"})", "def getSyncChunk(self, authenticationToken, afterUSN, maxEntries, fullSyncOnly):\r\n pass", "def make_more(self,result,con):", "def consume_next(self):\n response = six.next(self._response_iterator)\n self._counter += 1\n\n if self._last_scanned_row_key is None: # first response\n if response.last_scanned_row_key:\n raise InvalidReadRowsResponse()\n\n self._last_scanned_row_key = response.last_scanned_row_key\n\n row = self._row\n cell = self._cell\n\n for chunk in response.chunks:\n\n self._validate_chunk(chunk)\n\n if chunk.reset_row:\n row = self._row = None\n cell = self._cell = self._previous_cell = None\n continue\n\n if row is None:\n row = self._row = PartialRowData(chunk.row_key)\n\n if cell is None:\n cell = self._cell = PartialCellData(\n chunk.row_key,\n chunk.family_name.value,\n chunk.qualifier.value,\n chunk.timestamp_micros,\n chunk.labels,\n chunk.value)\n self._copy_from_previous(cell)\n else:\n cell.append_value(chunk.value)\n\n if chunk.commit_row:\n self._save_current_row()\n row = cell = None\n continue\n\n if chunk.value_size == 0:\n self._save_current_cell()\n cell = None", "def get_response(self):\n res = IODWriteMultipleRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n\n # append all block response\n res_blocks = []\n for block in self.getfieldval(\"blocks\"):\n res_blocks.append(block.get_response())\n res.setfieldval(\"blocks\", res_blocks)\n return res", "def limit_result(result_set):\n max_result = MAX_RESULT\n result = []\n if max_result > 0:\n result = result_set[:max_result]\n result.append(\"Total result: {}\".format(len(result_set)))\n return result", "def step_impl(context, size):\n assert len(context.response_json) == int(size)", "def to_poll_response_11(self, in_response_to):\n\n poll_response = tm11.PollResponse(message_id=tm11.generate_message_id(),\n in_response_to=in_response_to,\n collection_name=self.result_set.data_collection.name)\n\n if self.exclusive_begin_timestamp_label:\n poll_response.exclusive_begin_timestamp_label = self.exclusive_begin_timestamp_label\n\n if self.inclusive_end_timestamp_label:\n poll_response.inclusive_end_timestamp_label = self.inclusive_end_timestamp_label\n\n if self.result_set.subscription:\n poll_response.subscription_id = self.result_set.subscription.subscription_id\n\n poll_response.record_count = tm11.RecordCount(int(self.result_set.total_content_blocks), False)\n poll_response.more = self.more\n poll_response.result_id = str(self.result_set.pk)\n poll_response.result_part_number = int(self.part_number)\n\n for content_block in self.content_blocks.all():\n cb = content_block.to_content_block_11()\n poll_response.content_blocks.append(cb)\n\n return poll_response", "def more(self):\n if self.done:\n return ''\n else:\n data = self.data_wrapper(\n self.file.read(self.out_buffer_size))\n if not data:\n self.done = 1\n self.close()\n else:\n return data", "def iter_content(response: requests.Response, size):\n buffer = bytearray(size)\n num = 0\n try:\n while num < size:\n content = next(response.iter_content(size - num))\n buffer[num:num + len(content)] = content\n num += len(content)\n except StopIteration:\n raise RuntimeError('Content not long enough')\n return buffer", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def handle_response(self, key, response, resource):\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = (num, m, size)\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n\n elif block == 1:\n if m == 1:\n response.code = defines.responses[\"CONTINUE\"]\n response.block1 = (num, m, size)\n return response", "def __call__(self, tuple):\n del tuple['header']['Content-Length'] # Do NOT include length in \n params = tuple['request'].split('&')\n count = 10\n for param in params:\n namVal = param.split('=')\n if (namVal[0] == \"REPEAT\"):\n count = int(namVal[1])\n break\n fillString = \"-\" * count\n tuple['response'] = self.preamble + fillString + tuple['request'].replace(\"&\", '*')\n return tuple", "def to_chunked_dataframe(\n self, max_chunk_size: int = -1, timeout_sec: int = DEFAULT_TIMEOUT_SEC\n ) -> pd.DataFrame:\n # Max chunk size defined by user\n records = []\n for result in self.result(timeout_sec=timeout_sec):\n result.append(records)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if not records:\n yield pd.DataFrame.from_records(records)", "def fetch(self, limit, offset=0):\r\n self.limit = limit\r\n self.offset = offset\r\n return self", "def per_stream_response_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def _read_amt(self, byte_count):\n full_msg = bytearray()\n while len(full_msg) < byte_count:\n block = self.request.recv(byte_count - len(full_msg))\n full_msg.extend(block)\n return full_msg", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def _buffer_to(self, amount):\n if amount > self.lookahead:\n raise Exception(\n 'Cannot extend buffer to {}: '\n 'beyond buffer lookahead {}'.format(\n amount, self.lookahead\n )\n )\n while len(self.buffer) < amount:\n try:\n self.buffer.appendleft(next(self.stream))\n except StopIteration:\n break", "def PagedExpand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def test_txn_list_paginated_by_max_index(self):\n paging = Mocks.make_paging_response(0, 4)\n self.stream.preset_response(\n head_id='d',\n paging=paging,\n transactions=Mocks.make_txns('d', 'c', 'b'))\n\n response = await self.get_assert_200('/transactions?max=2&count=7')\n controls = Mocks.make_paging_controls(3, start_index=0)\n self.stream.assert_valid_request_sent(paging=controls)\n\n self.assert_has_valid_head(response, 'd')\n self.assert_has_valid_link(response, '/transactions?head=d&max=2&count=7')\n self.assert_has_valid_paging(response, paging,\n '/transactions?head=d&min=3&count=7')\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], 'd', 'c', 'b')", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:" ]
[ "0.53557193", "0.5322775", "0.5281807", "0.52735066", "0.524363", "0.5193939", "0.5091012", "0.49231088", "0.49192467", "0.49118719", "0.49110144", "0.4897224", "0.48677567", "0.48475388", "0.483681", "0.482466", "0.48011813", "0.47937223", "0.47919127", "0.4773665", "0.47668964", "0.47470227", "0.4744753", "0.4732712", "0.47260097", "0.4723791", "0.4720598", "0.4710793", "0.47010073", "0.46775597" ]
0.5432548
0
Craft an AMP response containing as many items as will fit within the size limit. Remaining items are stored as a "continuation", identified by a token that is returned to the client to fetch later via the ContinuationCommand.
def _itemsToResponse(self, items): itemsToSend = [] count = 0 if items: size = 0 while size < self._maxSize: try: item = items.pop() except (KeyError, IndexError): # We're done. # Note: because records is an iterable (list or set) # we're catching both KeyError and IndexError. break size = size + len(item) itemsToSend.append(item) count += 1 response = {"items": itemsToSend} if items: response["continuation"] = self._storeContinuation(items, "items") return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step_impl(context, size):\n assert len(context.response_json) == int(size)", "def response_space():", "def response_chunking(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"response_chunking\")", "def stream_n_messages(n):\n response = get_dict(\"url\", \"args\", \"headers\", \"origin\")\n n = min(n, 100)\n\n def generate_stream():\n for i in range(n):\n response[\"id\"] = i\n yield json.dumps(response) + \"\\n\"\n\n return Response(generate_stream(), headers={\"Content-Type\": \"application/json\"})", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.ax25_pdu_packer_sptr_set_max_noutput_items(self, m)", "def get_response(self):\n res = IODWriteMultipleRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n\n # append all block response\n res_blocks = []\n for block in self.getfieldval(\"blocks\"):\n res_blocks.append(block.get_response())\n res.setfieldval(\"blocks\", res_blocks)\n return res", "def iter_content(response: requests.Response, size):\n buffer = bytearray(size)\n num = 0\n try:\n while num < size:\n content = next(response.iter_content(size - num))\n buffer[num:num + len(content)] = content\n num += len(content)\n except StopIteration:\n raise RuntimeError('Content not long enough')\n return buffer", "def __call__(self, tuple):\n del tuple['header']['Content-Length'] # Do NOT include length in \n params = tuple['request'].split('&')\n count = 10\n for param in params:\n namVal = param.split('=')\n if (namVal[0] == \"REPEAT\"):\n count = int(namVal[1])\n break\n fillString = \"-\" * count\n tuple['response'] = self.preamble + fillString + tuple['request'].replace(\"&\", '*')\n return tuple", "def PagedExpand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_set_max_noutput_items(self, m)", "def handle_response(self, key, response, resource):\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = (num, m, size)\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n\n elif block == 1:\n if m == 1:\n response.code = defines.responses[\"CONTINUE\"]\n response.block1 = (num, m, size)\n return response", "def queue_response(self, **kwargs):\n self.response_list.append(kwargs)\n self.semaphore.release()", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def getSyncChunk(self, authenticationToken, afterUSN, maxEntries, fullSyncOnly):\r\n pass", "def make_data(self, limit: int):", "def game_sequence(self, upper_limit):\n for i in range(1, upper_limit):\n response = self.build_text_response_for_number_(i)\n yield response if response else i", "def chunked(self, n):\n return imap(self.__class__, chunked(self._bytes, n))", "async def test_txn_list_paginated_by_max_index(self):\n paging = Mocks.make_paging_response(0, 4)\n self.stream.preset_response(\n head_id='d',\n paging=paging,\n transactions=Mocks.make_txns('d', 'c', 'b'))\n\n response = await self.get_assert_200('/transactions?max=2&count=7')\n controls = Mocks.make_paging_controls(3, start_index=0)\n self.stream.assert_valid_request_sent(paging=controls)\n\n self.assert_has_valid_head(response, 'd')\n self.assert_has_valid_link(response, '/transactions?head=d&max=2&count=7')\n self.assert_has_valid_paging(response, paging,\n '/transactions?head=d&min=3&count=7')\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], 'd', 'c', 'b')", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def step_impl(context, key, size):\n assert len(context.response_json[key]) == int(size)", "def max_noutput_items(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_max_noutput_items(self)", "def _read_amt(self, byte_count):\n full_msg = bytearray()\n while len(full_msg) < byte_count:\n block = self.request.recv(byte_count - len(full_msg))\n full_msg.extend(block)\n return full_msg", "def compute_response(self, items_to_process):\n pass", "async def active_marquee(bot, context, response):\n\n # Setup text with whitespace padding\n total_length = 40 + len(response.extra)\n text = '{0: ^{1}}'.format(response.extra, total_length)\n for it in range(3):\n for move in range(total_length - 20):\n moving_text = '`|{:.20}|`'.format(text[move:])\n await asyncio.sleep(1) # Evenly distribute ratelimit\n await response.message.edit(content=moving_text)\n\n # When the marquee is done, just display the text\n await asyncio.sleep(1)\n await response.message.edit(content=response.extra)", "def to_poll_response_11(self, in_response_to):\n\n poll_response = tm11.PollResponse(message_id=tm11.generate_message_id(),\n in_response_to=in_response_to,\n collection_name=self.result_set.data_collection.name)\n\n if self.exclusive_begin_timestamp_label:\n poll_response.exclusive_begin_timestamp_label = self.exclusive_begin_timestamp_label\n\n if self.inclusive_end_timestamp_label:\n poll_response.inclusive_end_timestamp_label = self.inclusive_end_timestamp_label\n\n if self.result_set.subscription:\n poll_response.subscription_id = self.result_set.subscription.subscription_id\n\n poll_response.record_count = tm11.RecordCount(int(self.result_set.total_content_blocks), False)\n poll_response.more = self.more\n poll_response.result_id = str(self.result_set.pk)\n poll_response.result_part_number = int(self.part_number)\n\n for content_block in self.content_blocks.all():\n cb = content_block.to_content_block_11()\n poll_response.content_blocks.append(cb)\n\n return poll_response", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_set_max_noutput_items(self, m)", "def limit_result(result_set):\n max_result = MAX_RESULT\n result = []\n if max_result > 0:\n result = result_set[:max_result]\n result.append(\"Total result: {}\".format(len(result_set)))\n return result", "def _buffer_to(self, amount):\n if amount > self.lookahead:\n raise Exception(\n 'Cannot extend buffer to {}: '\n 'beyond buffer lookahead {}'.format(\n amount, self.lookahead\n )\n )\n while len(self.buffer) < amount:\n try:\n self.buffer.appendleft(next(self.stream))\n except StopIteration:\n break", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.hdlc_framer_sptr_set_max_noutput_items(self, m)", "def make_response_paginated(paginator: PaginationBase, op: Operation) -> None:\n status_code, item_schema = _find_collection_response(op)\n\n # Switching schema to Output schema\n try:\n new_name = f\"Paged{item_schema.__name__}\"\n except AttributeError:\n new_name = f\"Paged{str(item_schema).replace('.', '_')}\" # typing.Any case\n\n new_schema = type(\n new_name,\n (paginator.Output,),\n {\n \"__annotations__\": {paginator.items_attribute: List[item_schema]}, # type: ignore\n },\n ) # typing: ignore\n\n response = op._create_response_model(new_schema)\n\n # Changing response model to newly created one\n op.response_models[status_code] = response" ]
[ "0.5428353", "0.5276194", "0.50557584", "0.50412405", "0.49756488", "0.49587438", "0.49349442", "0.49303618", "0.49002728", "0.4886534", "0.48853728", "0.48849902", "0.487241", "0.48717028", "0.48281708", "0.48274213", "0.48222074", "0.48172882", "0.4801125", "0.47869977", "0.47784016", "0.47774154", "0.47464025", "0.47412333", "0.47309214", "0.47301534", "0.4724261", "0.47057337", "0.47004306", "0.4693328" ]
0.58718246
0
Turn a record in a dictionary of fields which can be reconstituted within the client
def recordToDict(self, record): fields = {} if record is not None: for field, value in record.fields.iteritems(): # FIXME: need to sort out dealing with enormous groups; we # can ignore these when sending AMP responses because the # client will always fetch members via a members( ) AMP # command. if field.name in (u"memberDNs", u"memberUIDs"): continue valueType = record.service.fieldName.valueType(field) if valueType in (unicode, bool): fields[field.name] = value elif valueType is uuid.UUID: fields[field.name] = str(value) elif issubclass(valueType, (Names, NamedConstant)): fields[field.name] = value.name if value else None return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_record(record):\n automatic_fields = ['created_at', 'modified_at']\n record = serialize_fields(filter_out_dict_keys(record, automatic_fields))\n\n return record", "def _sanitise_fields(self, record):\n sanitised = {}\n for k, v in record.items():\n new_key = k.replace('(', '_').replace(')', '_')\n sanitised[new_key] = v\n return sanitised", "def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):\n record = hide_confidential_fields(record, hide)\n record = unserialize_fields(record, hide)\n\n convert_float_timestamp2str(record)\n\n return record", "def dict_json(self, record):\n json_dict = {}\n\n fields_dict = record.fields_get()\n\n for name, field in fields_dict.items():\n if eval('record.' + name):\n # id and name (if exists) for M2O, O2M, M2M\n if field['type'] == 'many2one':\n json_dict[name] = {\n 'id': eval('record.' + name + '.id')\n }\n sub_fields_dict = eval('record.' + name + \".fields_get()\")\n if 'name' in sub_fields_dict and sub_fields_dict['name']['type'] in ['char', 'text', 'html']:\n json_dict[name]['name'] = eval('record.' + name + '.name')\n elif field['type'] == 'many2many' or field['type'] == 'one2many':\n json_dict[name] = []\n for sub_rec in eval('record.' + name):\n element = {'id': sub_rec.id}\n sub_fields_dict = sub_rec.fields_get()\n if 'name' in sub_fields_dict and sub_fields_dict['name']['type'] in ['char', 'text', 'html']:\n element['name'] = sub_rec.name\n\n json_dict[name].append(element)\n # if binary, change it in string\n elif field['type'] == 'binary':\n json_dict[name] = eval('record.' + name).decode('utf-8') if type(eval('record.' + name)) is bytes else eval('record.' + name)\n # if other, the value\n else:\n json_dict[name] = eval('record.' + name)\n\n return json_dict", "async def transform_record(db_pool, record):\n\n # Before creating the dict, we want to get the stable_id frm the DB\n async with db_pool.acquire(timeout=180) as connection:\n try: \n query = f\"\"\"SELECT stable_id, access_type\n FROM beacon_dataset\n WHERE id={dict(record).pop(\"dataset_id\")};\n \"\"\"\n statement = await connection.prepare(query)\n extra_record = await statement.fetchrow()\n except Exception as e:\n raise BeaconServerError(f'Query metadata (stableID) DB error: {e}') \n\n response = dict(record)\n\n response.pop(\"id\")\n response[\"datasetId\"] = dict(extra_record).pop(\"stable_id\") \n response[\"internalId\"] = response.pop(\"dataset_id\")\n response[\"exists\"] = True\n response[\"variantCount\"] = response.pop(\"variant_cnt\") \n response[\"callCount\"] = response.pop(\"call_cnt\") \n response[\"sampleCount\"] = response.pop(\"sample_cnt\") \n response[\"frequency\"] = 0 if response.get(\"frequency\") is None else float(response.pop(\"frequency\"))\n response[\"numVariants\"] = 0 if response.get(\"num_variants\") is None else response.pop(\"num_variants\")\n response[\"info\"] = {\"access_type\": dict(extra_record).pop(\"access_type\")} \n \n return response", "def record_to_dict(record, ctx):\n return dict(src_ip=record.src_ip,\n dst_ip=record.dst_ip)", "def __convert( source ):\n # Just in case things get this far but we don't know about the record\n if source['recordType'] not in definitions.RECORDS:\n return {\n 'rec_type': source['recordType']\n }\n\n # Create a flat wrapper\n record = estreamer.common.Flatdict( source )\n\n # Transform\n output = __selectWithNewKeys( record )\n\n return output", "def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)", "def record_to_dict(f_record, key_name: str):\n return_dict = {}\n for record in f_record:\n key = ''\n for f, v in record.items():\n if f == key_name:\n key = v\n else:\n try:\n return_dict[key].update({f: v})\n except KeyError:\n return_dict[key] = {f: v}\n return return_dict", "def mapLogRecord(self, record):\n newrec = record.__dict__\n for p in self.params:\n newrec[p] = self.params[p]\n maxParamLength = 4000\n # truncate and clean the message from non-UTF-8 characters\n try:\n newrec['msg'] = newrec['msg'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n try:\n newrec['message'] = newrec['message'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n return newrec", "def map_record(row: DLCSRecord, solr_client: Solr, config: typing.Dict) -> UrsusRecord: # pylint: disable=too-many-statements\n record: UrsusRecord = {\n field_name: map_field_value(row, field_name, config=config)\n for field_name in mapper.FIELD_MAPPING\n }\n\n # THUMBNAIL\n record[\"thumbnail_url_ss\"] = (\n record.get(\"thumbnail_url_ss\")\n or thumbnail_from_child(record, config=config)\n or thumbnail_from_manifest(record)\n )\n\n # COLLECTION NAME\n if \"Parent ARK\" in row and row[\"Parent ARK\"] in config[\"collection_names\"]:\n dlcs_collection_name = config[\"collection_names\"][row[\"Parent ARK\"]]\n record[\"dlcs_collection_name_tesim\"] = [dlcs_collection_name]\n\n # FIELDS\n record[\"uniform_title_sim\"] = record.get(\"uniform_title_tesim\")\n record[\"architect_sim\"] = record.get(\"architect_tesim\")\n record[\"author_sim\"] = record.get(\"author_tesim\")\n record[\"illuminator_sim\"] = record.get(\"illuminator_tesim\")\n record[\"scribe_sim\"] = record.get(\"scribe_tesim\")\n record[\"rubricator_sim\"] = record.get(\"rubricator_tesim\")\n record[\"commentator_sim\"] = record.get(\"commentator_tesim\")\n record[\"translator_sim\"] = record.get(\"translator_tesim\")\n record[\"lyricist_sim\"] = record.get(\"lyricist_tesim\")\n record[\"composer_sim\"] = record.get(\"composer_tesim\")\n record[\"illustrator_sim\"] = record.get(\"illustrator_tesim\")\n record[\"editor_sim\"] = record.get(\"editor_tesim\")\n record[\"calligrapher_sim\"] = record.get(\"calligrapher_tesim\")\n record[\"engraver_sim\"] = record.get(\"engraver_tesim\")\n record[\"printmaker_sim\"] = record.get(\"printmaker_tesim\")\n record[\"human_readable_language_sim\"] = record.get(\"human_readable_language_tesim\")\n record[\"names_sim\"] = name_fields(record)\n record[\"keywords_sim\"] = keywords_fields(record)\n record[\"collection_sim\"] = record.get(\"collection_ssi\")\n # explicit\n record[\"features_sim\"] = record.get(\"features_tesim\")\n # incipit\n # inscription\n record[\"script_sim\"] = record.get(\"script_tesim\")\n record[\"writing_system_sim\"] = record.get(\"writing_system_tesim\")\n record[\"year_isim\"] = year_parser.integer_years(record.get(\"normalized_date_tesim\"))\n record[\"date_dtsim\"] = solr_transformed_dates(solr_client,\n (date_parser.get_dates(record.get(\"normalized_date_tesim\"))))\n record[\"place_of_origin_sim\"] = record.get(\"place_of_origin_tesim\")\n record[\"associated_name_sim\"] = record.get(\"associated_name_tesim\")\n record[\"form_sim\"] = record.get(\"form_tesim\")\n record[\"support_sim\"] = record.get(\"support_tesim\")\n record[\"genre_sim\"] = record.get(\"genre_tesim\")\n record[\"subject_sim\"] = record.get(\"subject_tesim\")\n record[\"location_sim\"] = record.get(\"location_tesim\")\n record[\"named_subject_sim\"] = record.get(\"named_subject_tesim\")\n record[\"human_readable_resource_type_sim\"] = record.get(\"resource_type_tesim\")\n record[\"member_of_collections_ssim\"] = record.get(\"dlcs_collection_name_tesim\")\n\n # SINAI INDEX\n record[\"header_index_tesim\"] = header_fields(record)\n record[\"name_fields_index_tesim\"] = name_fields_index(record)\n\n # SORT FIELDS\n titles = record.get(\"title_tesim\")\n if isinstance(titles, typing.Sequence) and len(titles) >= 1:\n record[\"sort_title_ssort\"] = titles[0]\n\n # used a solr copyfield for shelfmark sorting\n # shelfmarks = record.get(\"shelfmark_ssi\")\n # print(shelfmarks)\n # if isinstance(shelfmarks, typing.Sequence) and len(shelfmarks) >= 1:\n # print(shelfmarks[0])\n # record[\"shelfmark_aplha_numeric_ssort\"] = shelfmarks[0]\n\n# -----------------------------------------------------------------------\n years = record.get(\"year_isim\")\n if isinstance(years, typing.Sequence) and len(years) >= 1:\n record[\"sort_year_isi\"] = min(years)\n\n dates = record.get(\"date_dtsim\")\n if isinstance(dates, typing.Sequence) and len(dates) >= 1:\n record[\"date_dtsort\"] = dates[0]\n return record", "def dump_record(record):\n rec = E.record()\n\n leader = record.get('leader')\n if leader:\n rec.append(E.leader(leader))\n\n if isinstance(record, GroupableOrderedDict):\n items = record.iteritems(with_order=False, repeated=True)\n else:\n items = iteritems(record)\n\n for df, subfields in items:\n # Control fields\n if len(df) == 3:\n if isinstance(subfields, string_types):\n controlfield = E.controlfield(subfields)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n elif isinstance(subfields, (list, tuple, set)):\n for subfield in subfields:\n controlfield = E.controlfield(subfield)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n else:\n # Skip leader.\n if df == 'leader':\n continue\n\n if not isinstance(subfields, (list, tuple, set)):\n subfields = (subfields,)\n\n df = df.replace('_', ' ')\n for subfield in subfields:\n if not isinstance(subfield, (list, tuple, set)):\n subfield = [subfield]\n\n for s in subfield:\n datafield = E.datafield()\n datafield.attrib['tag'] = df[0:3]\n datafield.attrib['ind1'] = df[3]\n datafield.attrib['ind2'] = df[4]\n\n if isinstance(s, GroupableOrderedDict):\n items = s.iteritems(with_order=False, repeated=True)\n elif isinstance(s, dict):\n items = iteritems(s)\n else:\n datafield.append(E.subfield(s))\n\n items = tuple()\n\n for code, value in items:\n if not isinstance(value, string_types):\n for v in value:\n datafield.append(E.subfield(v, code=code))\n else:\n datafield.append(E.subfield(value, code=code))\n\n rec.append(datafield)\n return rec", "def _parse_record(self, record, customization=None):\n d = {}\n\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.')\n return {}\n\n # if a comment record, add to bib_database.comments\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n\n self.bib_database.comments.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # if a preamble record, add to bib_database.preambles\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n\n self.bib_database.preambles.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # prepare record\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip('\\n')\n # treat the case for which the last line of the record\n # does not have a coma\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug('Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n\n # if a string record, put it in the replace_dict\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for i in record.split('{', 1)[1].strip('\\n').strip(',').strip('}').split('=')]\n key = key.lower() # key is case insensitive\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower() not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val.lower()]\n logger.debug('Return a dict')\n return d\n\n # for each line in record\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = \"\"\n inval = \"\"\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n # TODO: We may check that the keyword belongs to a known type\n if kv.startswith('@') and not inkey:\n # it is the start of the record - set the bibtype and citekey (id)\n logger.debug('Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book',\n 'booklet',\n 'conference',\n 'inbook',\n 'incollection',\n 'inproceedings',\n 'manual',\n 'mastersthesis',\n 'misc',\n 'phdthesis',\n 'proceedings',\n 'techreport',\n 'unpublished'):\n logger.warning('Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n # it is a line with a key value pair on it\n logger.debug('Line contains a key-pair value and the key is not stored yet.')\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n # if it looks like the value spans lines, store details for next loop\n if (val.count('{') != val.count('}')) or (val.startswith('\"') and not val.replace('}', '').endswith('\"')):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug('Continues the previous line to complete the key pair value...')\n # if this line continues the value from a previous line, append\n inval += ', ' + kv\n # if it looks like this line finishes the value, store it and clear for next loop\n if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('\"') and inval.endswith('\"')):\n logger.debug('This line represents the end of the current key-pair value')\n d[inkey] = self._add_val(inval)\n inkey = \"\"\n inval = \"\"\n else:\n logger.debug('This line does NOT represent the end of the current key-pair value')\n\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n # apply any customizations to the record object then return it\n logger.debug('Apply customizations and return dict')\n return customization(d)", "def sanitize_record(record, table):\n try:\n columns = table.columns\n except AttributeError:\n columns = vars(table)\n return {key: value for key, value in record.items() if key in columns}", "def __build_info(self, obj: Object, record: TNSRecord) -> dict:\n type_id = self.__get_type_id(record)\n redshift = record.redshift\n type_changed = type_id != obj.type_id\n redshift_changed = redshift != obj.redshift\n if type_changed or redshift_changed: # keep history of previous values\n return {\n 'type_id': type_id, 'redshift': redshift, 'aliases': {**obj.aliases, 'iau': record.name},\n 'history': self.__build_history(obj),\n 'data': {**obj.data, 'tns': record.to_json()}\n }\n elif 'iau' not in obj.aliases:\n return {\n 'aliases': {**obj.aliases, 'iau': record.name},\n 'data': {**obj.data, 'tns': record.to_json()}\n }\n else:\n return {}", "def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,\n fields=_SERIALIZED_FIELDS):\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {\n key: hide_confidential_fields(_unserialize(record[key]), hide)\n for key in keys if record[key]\n })", "def record_dict(self):\n return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}", "def getrecord_fetcher(record_uuid):\n record = current_oaiserver.record_cls.get_record(record_uuid)\n record_dict = record.dumps()\n record_dict[\"updated\"] = record.updated\n return record_dict", "def updated_full_record(full_record):\n full_record[\"access\"][\"status\"] = \"embargoed\"\n full_record[\"created\"] = \"2023-03-23T00:00:00.000000+00:00\"\n full_record[\"id\"] = \"abcde-fghij\"\n full_record[\"metadata\"][\"resource_type\"][\"id\"] = \"other\"\n\n return full_record", "def customizations(record):\n\n record = type(record)\n # record = author(record)\n record = convert_to_unicode(record)\n # record = editor(record)\n # record = journal(record)\n # record = keyword(record)\n # record = link(record)\n # record = page_double_hyphen(record)\n # record = doi(record)\n return record", "def extra_from_record(self, record):\n return {\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n }", "def serialize_fields(record, fields=_SERIALIZED_FIELDS):\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {\n key: _serialize(record[key])\n for key in keys if record[key] is not None\n })", "def field_wrapper(field):\n return {'field': field}", "def get_record_specific_answer_fields(rr):\n if rr.rdtype == rdatatype.A or rr.rdtype == rdatatype.AAAA:\n return {\"Address\": rr.address}\n\n if (rr.rdtype == rdatatype.CNAME or\n rr.rdtype == rdatatype.PTR or\n rr.rdtype == rdatatype.NS):\n return {\"Target\": str(rr.target)}\n\n if rr.rdtype == rdatatype.MX:\n return {\n \"Preference\": rr.preference,\n \"MailExchanger\": str(rr.exchange)\n }\n\n if rr.rdtype == rdatatype.SOA:\n return {\n \"MasterServerName\": str(rr.mname),\n \"MaintainerName\": str(rr.rname),\n \"Serial\": rr.serial,\n \"Refresh\": rr.refresh,\n \"Retry\": rr.retry,\n \"Expire\": rr.expire,\n \"NegativeTtl\": rr.minimum # Note: keyname changes in JSON RFC\n }\n\n if rr.rdtype == rdatatype.TXT:\n # TXT was not described in the JSON RFC\n return {\n \"TxtData\": str(rr),\n }\n\n if rr.rdtype == rdatatype.NAPTR:\n return {\n \"Flags\": rr.flags,\n \"Order\": rr.order,\n \"Service\": rr.service,\n \"Preference\": rr.preference,\n \"Regexp\": rr.regexp,\n \"Replacement\": str(rr.replacement)\n }\n\n if rr.rdtype == rdatatype.LOC:\n return {\n \"Altitude\": rr.altitude / 100, # .altitude is in centimeters\n \"Longitude\": rr.longitude,\n \"Latitude\": rr.latitude\n }\n\n return {}", "def _reconstruct_metadata(metadata_record):\n rec = metadata_record[\"m\"]\n return {key: val for key, val in rec.items() if key != \"state\"}", "def parse_record(self, in_rec):\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'parameters':\n for m, mv in v.items():\n self.metadata[m] = mv\n else:\n self.metadata[k] = v\n \n if self.image is not None:\n self.metadata['imageUrl'] = self.image.get_metadata(\\\n 'thisRecordUrl')\n self.metadata['imageMetadata'] = self.image.get_metadata(\\\n 'metadataUrl')\n self.metadata['imageStartDate'] = self.image.get_date()\n \n if 'dateRapiOrdered' not in self.metadata.keys():\n self.metadata['dateRapiOrdered'] = self.image.get_metadata(\\\n 'dateRapiOrdered')\n self.metadata['orderSubmitted'] = self.image.get_metadata(\\\n 'orderSubmitted')", "def Result(row, schema):\r\n return dict(zip(schema.fields(), row))", "def parse_record(self, in_rec):\n \n geo_util = geo.Geo()\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'metadata2': continue\n elif k == 'geometry':\n self.metadata['geometry'] = v\n coords = v['coordinates']\n self.metadata['wkt'] = geo_util.convert_imageGeom(\\\n coords, 'wkt')\n elif k == 'metadata':\n for m in v:\n key = to_camelCase(m[0])\n self.metadata[key] = m[1]\n else:\n self.metadata[k] = v", "def _extract_subdict(self, rec, keys):\n d = {}\n d['msg_id'] = rec['msg_id']\n for key in keys:\n d[key] = rec[key]\n return deepcopy(d)", "def convert_fields(fields, _fields):\n mapper = {\n \"id\": \"local_id\",\n \"local_id\": \"id\"\n }\n fields = deepcopy(fields)\n for field in fields:\n if field['name'] in _fields:\n field['name'] = mapper[field['name']]\n return fields" ]
[ "0.75376725", "0.6702061", "0.6572023", "0.65429646", "0.64269215", "0.6398658", "0.6394913", "0.63934547", "0.6369725", "0.6368727", "0.63439614", "0.62543195", "0.62466395", "0.61455506", "0.61025786", "0.6092736", "0.60618514", "0.6054564", "0.6033646", "0.6030067", "0.6024942", "0.60058755", "0.5983611", "0.5980088", "0.5959179", "0.5929218", "0.5906088", "0.5900864", "0.58783257", "0.5872588" ]
0.7432842
1
Coerce the given C{val} to type of C{configDict[key]}
def _coerceOption(self, configDict, key, value): if key in configDict: if isinstance(configDict[key], bool): value = value == "True" elif isinstance(configDict[key], (int, float, long)): value = type(configDict[key])(value) elif isinstance(configDict[key], (list, tuple)): value = value.split(',') elif isinstance(configDict[key], dict): raise UsageError( "Dict options not supported on the command line" ) elif value == 'None': value = None return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coerceOption(configDict, key, value):\n if key in configDict:\n if isinstance(configDict[key], bool):\n value = value == \"True\"\n\n elif isinstance(configDict[key], (int, float, long)):\n value = type(configDict[key])(value)\n\n elif isinstance(configDict[key], (list, tuple)):\n value = value.split(\",\")\n\n elif isinstance(configDict[key], dict):\n raise UsageError(\n \"Dict options not supported on the command line\"\n )\n\n elif value == \"None\":\n value = None\n\n return value", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def _get_config_val(self, config: dict, config_key: str):\n config_val = config.get(config_key, {})\n if isinstance(config_val, list) and isinstance(config_val[0], dict):\n config_val = dict(ChainMap(*config_val))\n return config_val", "def convert_val(val_str, val):\n if val is bool:\n if 'true' in val_str.lower(): val_str = 'true' \n else: val_str = '' # otherwise set to false\n val_type = val\n try:\n return val_type(val_str)\n except ValueError:\n # Can it be a float ?\n return val_type(float(val_str))", "def _conversion(self, val):\n if (self.__set_type == \"str\"):\n return val\n else:\n try:\n return ast.literal_eval(val)\n except ValueError:\n return None", "def __convert_value(\n key: str,\n value: any,\n data_type: type\n) -> any:\n\n if value is None:\n return None\n\n if isinstance(value, data_type):\n return value\n\n # convert any integers if a float is expected. This can happen during\n # JSON encoding and decoding.\n if data_type == float and isinstance(value, int):\n return float(value)\n\n # datetime objects are supplied as a JSON (JavaScript) string.\n if data_type == datetime and isinstance(value, str):\n return parse_time(value)\n\n # enumerations are supplied as strings\n if issubclass(data_type, NebEnum) and isinstance(value, str):\n return getattr(data_type, \"parse\")(value)\n\n # dicts are interpreted as objects, so we instantiate a new object from\n # the provided dictionary. This may fail if the supplied data_type does\n # not have a constructor that accepts a dict.\n if isinstance(value, dict):\n return data_type(value)\n\n # if we got to this place an invalid data type was supplied and we raise\n # a TypeError.\n error = f\"{key} of invalid type {data_type}, got {value.__class__}\"\n raise TypeError(error)", "def castInputToBuiltInType(key, value):\n\n try:\n if key in ['bind_npi', 'dst_npi', 'src_npi']:\n return addr_npi_value_map[value]\n elif key in ['bind_ton', 'dst_ton', 'src_ton']:\n return addr_ton_value_map[value]\n elif key == 'ripf':\n return replace_if_present_flap_value_map[value]\n elif key == 'priority':\n return priority_flag_value_map[value]\n elif key in ['con_fail_retry', 'con_loss_retry', 'ssl']:\n if value == 'yes':\n return True\n elif value == 'no':\n return False\n else:\n raise KeyError('Boolean value must be expressed by yes or no.')\n elif (key == 'loglevel' and\n value not in [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]):\n raise KeyError('loglevel must be numeric value of 10, 20, 30, 40 or 50.')\n elif isinstance(value, str) and value.lower() == 'none':\n value = None\n except KeyError:\n raise UnknownValue('Unknown value for key %s: %s' % (key, value))\n\n return value", "def subcfg2instance(\n cfg: DictConfig,\n keyname: str,\n valueidx: int,\n namespace: list = [],\n **kwargs: dict\n):\n # kwargs to variables\n for k,v in kwargs.items():\n globs = globals()\n locs = locals()\n exec(f'{k} = v', globs, locs)\n # multiple or one option\n if type(cfg[keyname]) == ListConfig:\n assert type(valueidx) == int, f'{keyname} has multiple options, but no index is givin.'\n c = cfg[keyname][valueidx]\n else:\n raise ValueError(f'expected ListConfig, but got {type(cfg[keyname])} in cfg {keyname} value')\n # if int, return directly\n if type(c) == int or type(c) == float:\n return c\n elif type(c) == ListConfig:\n return list(c)\n # if str enclosed in \", return as str\n elif type(c) == str and c.startswith(\"'\") and c.endswith(\"'\"):\n return c[1:-1]\n # if str, regarded as classname\n elif type(c) == str:\n classname = c\n arguments = {}\n elif type(c) == DictConfig:\n classname = list(c.keys())[0]\n assert type(classname) == str, f'expected str, but got {type(classname)} in {keyname} classname'\n arguments = list(c.values())[0]\n assert type(arguments) == DictConfig, f'expected DictConfig, but got {type(arguments)} in {keyname} arguments'\n globs = globals()\n locs = locals()\n tmp = {}\n for k,v in arguments.items():\n assert type(k) == str, f'expected str, but got {type(k)} in {keyname} argument key: {k}'\n if type(v) == int or type(v) == float or type(v) == bool:\n tmp[k] = v\n elif type(v) == str and v.startswith(\"'\") and v.endswith(\"'\"):\n tmp[k] = v[1:-1]\n elif type(v) == str:\n tmp[k] = eval(v, globs, locs)\n else:\n raise ValueError(f'expected ListConfig, DictConfig or str, but got {type(v)} in {keyname} argument value: {v}')\n arguments = tmp\n else:\n raise ValueError(f'expected DictConfig or str, but got {type(c)} in {c}')\n # get the operation corresponding to the class name\n return _classname2instance(classname, arguments, namespace)", "def __setitem__(self, key, value):\n if key not in self._type_converter.keys():\n self._dict[key] = value\n self._type_converter[key] = to_type_converter(value)\n else:\n self._dict[key] = self._type_converter[key](value)", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def convert_value_to_type_of_default_type(\n key: ConfigKey, v: ConfigValue, default_value: ConfigValue\n) -> ConfigValue:\n if default_value is not None:\n if type(default_value) == type(True) and type(v) != type(True):\n allowed_values = [\"true\", \"false\", \"1\", \"0\", \"yes\", \"no\", \"y\", \"n\"]\n if str(v).lower() not in allowed_values:\n msg = (\n f\"Provided configuration \"\n f'\"{key}={v}\" is invalid. Allowed values for {key} are {allowed_values}'\n )\n raise VdkConfigurationError(msg)\n v = str(v).lower() in [\"true\", \"1\", \"yes\", \"y\"]\n else:\n try:\n v = type(default_value)(v) # cast to type of default_value:\n except ValueError:\n msg = (\n f'Provided configuration \"{key}={v}\" is invalid. '\n f'Cannot cast \"{v}\" to {str(type(default_value).__name__)}'\n )\n raise VdkConfigurationError(msg)\n return v", "def __init__(self, val: Dict[str, Any]):\n self.val = val", "def _convert(self, val):\n if isinstance(val, dict) and not isinstance(val, DotDict):\n return DotDict(val), True\n elif isinstance(val, list) and not isinstance(val, DotList):\n return DotList(val), True\n\n return val, False", "def _adaptConfigurationValue (cls, value : String) -> Object:\n\n Logging.trace(\">>: %r\", value)\n uppercasedValue = value.upper()\n\n if uppercasedValue in cls._validBooleanValueNames:\n result = (uppercasedValue in cls._trueBooleanValueNames)\n elif (cls._integerRegExp.match(value)\n or cls._hexIntegerRegExp.match(value)): \n result = int(value)\n elif cls._realRegExp.match(value):\n result = float(value)\n else:\n result = value\n \n Logging.trace(\"<<: %r\", result)\n return result", "def override(self, config_dict_or_str):\n if isinstance(config_dict_or_str, str):\n config_dict = self.parse_from_str(config_dict_or_str)\n elif isinstance(config_dict_or_str, dict):\n config_dict = config_dict_or_str\n else:\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\n\n self._update(config_dict, allow_new_keys=False)", "def _val_to_str(self, value):\n for k, v in TYPE_MAPPING.iteritems():\n if v == type(value):\n if v == list:\n return k + \": \" + repr(value)\n else:\n return k + \": \" + str(value)\n\n raise ValueError(\"We don't know how to store that \"\n \"kind of setting: \", type(value))", "def _getVal(tmpval):\n aval = None\n if type(tmpval) == objc.pyobjc_unicode:\n aval = tmpval.lstrip('_$!<').rstrip('>!$_')\n elif issubclass(tmpval.__class__, NSDate):\n aval = tmpval.description()\n elif type(tmpval) == NSCFDictionary:\n aval = dict([(k.lower(), tmpval[k]) for k in tmpval.keys()])\n return aval", "def from_config(key,val,globals,locals,path):\n try:\n if hasattr(val,'_result'):\n if superdebug:\n if isinstance(val,str):\n _logger.debug(f'{path}: evaluate _result() of a {val!r}')\n else:\n _logger.debug(f'{path}: evaluate _result() of a {type(val).__name__}')\n result=val._result(globals,locals)\n if superdebug and hasattr(result,'_path'):\n _logger.debug(f'{path}: result is at path {result._path}')\n return from_config(key,result,globals,locals,path)\n return val\n except(CROWException) as ce:\n _logger.error(f'{path}: {type(ce).__name__} error {str(ce)[:80]}')\n raise\n except(KeyError,NameError,AttributeError) as ae:\n _logger.error(f'{path}: {type(ae).__name__} error {str(ae)[:80]}')\n raise CalcKeyError(f'{path}: {type(val).__name__} {str(val)[0:80]} - '\n f'{type(ae).__name__} {str(ae)} --in-- '\n f'{{{\", \".join([ k for k in locals.keys() ])}}}')\n except(SyntaxError,TypeError,IndexError) as ke:\n if 'f-string: unterminated string' in str(ke):\n _logger.error(f'{path}: {type(ke).__name__} f string error {str(ke)[:80]}')\n# raise CalcKeyError(f'{path}: {type(val).__name__} \n raise CalcKeyError(f'''{path}: {type(val).__name__}: probable unbalanced parentheses ([{{\"''\"}}]) in {str(val)[0:80]} {str(ke)[:80]}''')\n _logger.error(f'{path}: {type(ke).__name__} error {str(ke)[:80]}')\n raise CalcKeyError(f'{path}: {type(val).__name__} {str(val)[0:80]} - '\n f'{type(ke).__name__} {str(ke)[:80]}')\n except RecursionError as re:\n raise CalcRecursionTooDeep(\n f'{path}: !{key} {type(val).__name__}')", "def LoadConfigFromString(str_val, config_class=None, factory=None):\r\n if factory is None:\r\n factory = resources.DefaultFactory()\r\n if config_class is None:\r\n config_class = Config\r\n config_dict = yaml.load(str_val)\r\n if not type(config_dict) == dict:\r\n raise errors.ConfigError(\"Config must be a YAML dictionary\")\r\n return config_class.FromDict(config_dict, factory)", "def _convert_settings_to_dagster_config(d: dict) -> None:\n for k, v in d.items():\n if isinstance(v, dict):\n _convert_settings_to_dagster_config(v)\n else:\n try:\n d[k] = Field(type(v), default_value=v)\n except DagsterInvalidDefinitionError:\n # Dagster config accepts a valid dagster types.\n # Most of our settings object properties are valid types\n # except for fields like taxonomy which are the AnyHttpUrl type.\n d[k] = Field(Any, default_value=v)", "def _map_config_values(config, fn):\n if isinstance(config, dict):\n return {key: _map_config_values(value, fn) for key, value in config.items()}\n elif isinstance(config, list):\n return [_map_config_values(elem, fn) for elem in config]\n else:\n return fn(config)", "def transform_to_config(inputdic):\n result = {}\n for key, value in inputdic.iteritems():\n if key in OPT_CONFIG_MAP:\n result[OPT_CONFIG_MAP[key]] = value\n return result", "def set_config(param, value):\n _config = loadConfig()\n _paramField = rgetattr(_config, param)\n # define types that can be cast from command line input\n primitive = (int, str, bool)\n\n def is_primitiveType(_type):\n return _type in primitive\n\n # cast type\n if type(_paramField) == type(Union) and is_primitiveType(type(_paramField).__args__[0]):\n value = type(_paramField).__args__[0](value)\n elif is_primitiveType(type(_paramField)):\n value = type(_paramField)(value)\n\n try:\n rsetattr(_config, param, value)\n except TypeError as err:\n click.echo(err)\n saveConfig(_config)", "def _coerce_mapping(\n self, value: Any, origin: Type, annotation: Mapping[Any, Any]\n ) -> Mapping:\n args = self.get_args(annotation)\n value = self._coerce_builtin(value, origin)\n if args:\n key_type, value_type = args\n return type(value)(\n (self.coerce_value(x, key_type), self.coerce_value(y, value_type))\n for x, y in value.items()\n )\n\n return value", "def test_config_types() -> None:\n config = Config(\n environ={\"STR\": \"some_str_value\", \"STR_CAST\": \"some_str_value\", \"BOOL\": \"true\"}\n )\n\n assert_type(config(\"STR\"), str)\n assert_type(config(\"STR_DEFAULT\", default=\"\"), str)\n assert_type(config(\"STR_CAST\", cast=str), str)\n assert_type(config(\"STR_NONE\", default=None), Optional[str])\n assert_type(config(\"STR_CAST_NONE\", cast=str, default=None), Optional[str])\n assert_type(config(\"STR_CAST_STR\", cast=str, default=\"\"), str)\n\n assert_type(config(\"BOOL\", cast=bool), bool)\n assert_type(config(\"BOOL_DEFAULT\", cast=bool, default=False), bool)\n assert_type(config(\"BOOL_NONE\", cast=bool, default=None), Optional[bool])\n\n def cast_to_int(v: Any) -> int:\n return int(v)\n\n # our type annotations allow these `cast` and `default` configurations, but\n # the code will error at runtime.\n with pytest.raises(ValueError):\n config(\"INT_CAST_DEFAULT_STR\", cast=cast_to_int, default=\"true\")\n with pytest.raises(ValueError):\n config(\"INT_DEFAULT_STR\", cast=int, default=\"true\")", "def test_dictionary_coerce():\n\n @type_checked\n def _run_test(something:{int: str}):\n for key, value in something.items():\n assert isinstance(key, int)\n assert isinstance(value, str)\n\n _run_test(something={123: \"abc\", 2314: 12312, \"123\": \"abc\"})", "def TranslateKeyValue(key, value):\n key = SETTINGS.get(key, key)\n if key not in SETTINGS_INVERSE:\n raise Exception(\"Didn't understand key %s\" % key)\n\n value = str(value)\n valueMap = VALUES.get(key, {})\n if valueMap:\n value = valueMap.get(value, value)\n if not value.isdigit() or int(value) < 0 or int(value) >= len(valueMap):\n raise Exception(\"Didn't understand value %s for key %s\" % (value, key))\n\n else:\n parts = (value[1:] if value.startswith('-') else value).split('.')\n error = None\n if len(parts) is 0:\n error = 'Empty'\n elif len(parts) > 2:\n error = 'Too many . in'\n elif not parts[0].isdigit():\n error = 'Non-digit in'\n elif len(parts) is 2 and not parts[1].isdigit():\n error = 'Non-digit in'\n\n if error:\n raise 'Exception: %s number %s for key %s' % (value, key)\n\n return key, value", "def __setitem__(\n self,\n key: str,\n val: ValidKVs,\n ) -> None:\n str_val = conv_kv(val)\n key_fold = key.casefold()\n for k in self._keys:\n if k.casefold() == key_fold:\n # Check case-insensitively for this key first\n orig_val = self._keys.get(k)\n self._keys[k] = str_val\n key = k\n break\n else:\n orig_val = self._keys.get(key)\n self._keys[key] = str_val\n\n # TODO: if 'mapversion' is passed and self is self.map.spawn, update version there.\n\n # Update the by_class/target dicts with our new value\n if key_fold == 'classname':\n _remove_copyset(self.map.by_class, orig_val or '', self)\n self.map.by_class[str_val].add(self)\n elif key_fold == 'targetname':\n _remove_copyset(self.map.by_target, orig_val, self)\n self.map.by_target[str_val].add(self)\n elif key_fold == 'nodeid':\n try:\n node_id = int(orig_val) # type: ignore # Using as a cast\n except (TypeError, ValueError):\n pass\n else:\n self.map.node_id.discard(node_id)\n try:\n node_id = int(val) # type: ignore # Using as a cast\n except (TypeError, ValueError):\n pass\n else:\n self._keys[key] = str(self.map.node_id.get_id(node_id))", "def catchInput(inDict, inKey, defaultVal):\n\n\t# TODO: correct error handling if default type is not recognized\n\t# TODO: check against lowercase'd strings so that inputs are not case sensitive. Do this for True/False too\n\t# TODO: instead of trusting user for NoneType, could also use NaN/Inf to indicate int/float defaults without passing a numerical default\n\t# \t\tor could just pass the actual default type lol, that'd be easier\n\n\tdefaultType = type(defaultVal)\n\ttry:\n\t\t# if NoneType passed as default, trust user\n\t\tif (defaultType == type(None)):\n\t\t\toutVal = inDict[inKey]\n\t\telse:\n\t\t\toutVal = defaultType(inDict[inKey])\n\texcept:\n\t\toutVal = defaultVal\n\n\treturn outVal", "def from_config_string(self, v: str) -> Any:\n try:\n v = eval(v)\n except Exception:\n pass\n return self.instance(v)" ]
[ "0.6642269", "0.64682454", "0.58132166", "0.5712655", "0.57071173", "0.5636776", "0.5635766", "0.56199664", "0.5616287", "0.5596194", "0.5555847", "0.55442125", "0.5527948", "0.5423641", "0.5335233", "0.5314222", "0.529805", "0.5274796", "0.5247439", "0.52113086", "0.52068865", "0.51566523", "0.51538324", "0.5139774", "0.5103049", "0.5098194", "0.509512", "0.5053839", "0.5048862", "0.5042035" ]
0.66664517
0
Set an option to override a value in the config file. True, False, int, and float options are supported, as well as comma separated lists. Only one option may be given for each option flag, however multiple option flags may be specified.
def opt_option(self, option): if "=" in option: path, value = option.split('=') self._setOverride( DEFAULT_CONFIG, path.split('/'), value, self.overrides ) else: self.opt_option('%s=True' % (option,))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def opt_option(self, option):\n if \"=\" in option:\n path, value = option.split(\"=\")\n self.setOverride(\n DEFAULT_CONFIG,\n path.split(\"/\"),\n value,\n self.overrides\n )\n else:\n self.opt_option(\"{}=True\".format(option))", "def set_option(self, dest, value, force=True):\r\n if hasattr(self._option_values, dest) and not force:\r\n return\r\n setattr(self._option_values, dest, value)", "def setOption(self, name, value):\n name = name.lower()\n try:\n self.defaultOptions[name]\n except KeyError:\n Error(\"Option \\'%-30s\\' is not a valid %s option.\" % (\n name, self.name))\n\n # Make sure we are not trying to change an immutable option if\n # we are not allowed to.\n if self.solverCreated and name in self.imOptions:\n raise Error(\"Option '%-35s' cannot be modified after the solver \"\n \"is created.\" % name)\n\n # Now we know the option exists, lets check if the type is ok:\n if isinstance(value, self.defaultOptions[name][0]):\n # Just set:\n self.options[name] = [type(value), value]\n else:\n raise Error(\"Datatype for Option %-35s was not valid \\n \"\n \"Expected data type is %-47s \\n \"\n \"Received data type is %-47s\" % (\n name, self.defaultOptions[name][0], type(value)))", "def set_option(self, **kwargs):\n option = self.dismod_file.option\n unknowns = list()\n for name in kwargs.keys():\n if not (option.option_name == name).any():\n unknowns.append(name)\n if unknowns:\n raise KeyError(f\"Unknown options {unknowns}\")\n\n for name, value in kwargs.items():\n if isinstance(value, str):\n str_value = value\n elif isinstance(value, Iterable):\n str_value = \" \".join(str(x) for x in value)\n elif isinstance(value, bool):\n str_value = str(value).lower()\n elif value is None or isnan(value):\n str_value = None\n else:\n str_value = str(value)\n if str_value is not None:\n option.loc[option.option_name == name, \"option_value\"] = str_value\n else:\n option.loc[option.option_name == name, \"option_value\"] = nan\n option = option.reset_index(drop=True)\n option = option.assign(option_id=option.index)\n self.dismod_file.option = option", "async def _opt_set(self, ctx, option, value):\n try:\n guild_options = self.database.get_guild_options(ctx.guild.id)\n cur_val = getattr(guild_options, option)\n if isinstance(cur_val, (int, bool)):\n if value.upper() == \"ALLOW\" or value.upper() == \"TRUE\":\n value = True\n elif value.upper() == \"FORBID\" or value.upper() == \"FALSE\":\n value = False\n else:\n await ctx.send(\"Sorry, that option only accepts true or false values.\")\n return\n if isinstance(cur_val, str):\n value = utils.replace_escapes(value)\n setattr(guild_options, option, value)\n self.database.save_item(guild_options)\n await ctx.send(f\"Option {option} set to `{value}`\")\n except AttributeError:\n await ctx.send(\"I don't recognize that option.\")", "def setOption(self, name, value):\n petsc.optionsSetValue(name, value)\n return", "def add_override_flags(parser):\n override_group = parser.add_mutually_exclusive_group(required=False)\n override_group.add_argument('--override', action='store_true', dest='override',\n help='Allow overriding values in input file with values from CLI arguments. '\n 'Overriding values is disallowed by default. '\n 'Adding the --no-override flag explicitly disallows overriding values.')\n override_group.add_argument('--no-override', action='store_false', dest='override', help=argparse.SUPPRESS)", "def set_option(self, option, value):\n for option_dict in (\"_general_options\", \"_specific_options\"):\n option_dict = getattr(self, option_dict)\n if option in option_dict:\n if option_dict[option][\"divider\"] != 1:\n value /= float(option_dict[option][\"divider\"])\n setattr(self, \"_\" + option, value)\n break", "def set(self, option, value):\n\t\tself.config_parser.set(self.section_name, option, value)", "def set_option(self, optname, value, action=None, optdict=None):\n BaseChecker.set_option(self, optname, value, action, optdict)\n if optname == \"min-similarity-lines\":\n self.min_lines = self.config.min_similarity_lines\n elif optname == \"ignore-comments\":\n self.ignore_comments = self.config.ignore_comments\n elif optname == \"ignore-docstrings\":\n self.ignore_docstrings = self.config.ignore_docstrings\n elif optname == \"ignore-imports\":\n self.ignore_imports = self.config.ignore_imports", "def set_option(name, option):\n ffi.lib.LLVMPY_SetCommandLine(_encode_string(name),\n _encode_string(option))", "def set_option(self, option, value):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n self._options[option] = value", "def set_option(self, name: str, value: str, section: str = None):\n # TODO: implement\n pass", "def setOption(self, pluginOptionDict):\n return True", "def options(self, value):\n self._options = value\n if self._options.get(\"legacy\"):\n self._options[\"extended\"] = False", "def override_from_parsed_args(self, parsed_args):\n arg_values = {\n o.name: getattr(parsed_args, o.name)\n for o in _OPTIONS\n if getattr(parsed_args, o.name, None) is not None\n }\n if arg_values:\n LOG.info('[config] updating from command line options')\n self.override(**arg_values)", "def set_option(k, v=\"True\"):\n if k == \"log\":\n OPTIONS_TABLE[k] = getattr(logging, v.upper(), None)\n elif k == \"backend\":\n OPTIONS_TABLE[k] = sdm_backends.Backends.get_backend_name(v)\n elif k == \"config\":\n OPTIONS_TABLE[k] = sdm_util.get_abs_path(v)", "def set_option(self, name, value):\n self._body[name] = value", "def add_override_argument(parser, *names, **kwargs):\r\n if not names:\r\n names = DEFAULT_OVERRIDE_OPTION_NAMES\r\n dest = kwargs.pop('dest', None)\r\n required = kwargs.pop('required', False)\r\n help = kwargs.pop('help', 'extra overrides to apply to the config')\r\n if kwargs:\r\n raise TypeError('add_override_argument() got an invalid keyword argument: %s' %\r\n list(kwargs)[0])\r\n\r\n ov_container = ConfigContainer()\r\n ov_container.get_metadata().is_override_set = True\r\n parser.add_argument(\r\n *names,\r\n dest=dest,\r\n default=ov_container,\r\n required=required,\r\n action=_add_to_override_set,\r\n type=_dict_from_string,\r\n help=help\r\n )", "def set_option(self, key, value):\n self.options[key] = value", "def set_override(self, name, override, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['override'] = self._get_enforced_type_value(\n opt_info['opt'], override)\n opt_info['location'] = LocationInfo(\n Locations.set_override,\n _get_caller_detail(3), # this function has a decorator to skip\n )", "def set(self, option, value):\n section, option_ = self._get_section_option(option)\n try:\n val = self._cfg[section][option_].cls(value)\n if self._cfg[section][option_].validate:\n val = self._cfg[section][option_].validate(val)\n except (ValueError, ConfigValueError) as err:\n raise ConfigError(str(err), CONF_ERROR)\n # Do not write default values also skip identical values\n if not self._cfg[section][option_].default is None:\n old_val = self.dget(option)\n else:\n old_val = self.pget(option)\n if val == old_val:\n return\n if not RawConfigParser.has_section(self, section):\n self.add_section(section)\n RawConfigParser.set(self, section, option_, val)\n self._save_changes()", "def set_option(key: str, value: Any, where_defined: str = _USER_DEFINED) -> None:\n with _config_lock:\n # Ensure that our config files have been parsed.\n get_config_options()\n _set_option(key, value, where_defined)", "def set_option(opt_name, value):\n assert hasattr(Server, opt_name), (\n \"Attribute {} doesn't exists at \"\n \"Server class\"\n ).format(opt_name)\n\n setattr(Server, opt_name, value)", "def set_option(self, name, value):\n self._params[name] = value", "def set_option(self, name, value):\n self._params[name] = value", "def set(self, option_id, value, priority=1, source='unknown', entity=None):\n self._type_checker.assert_type(option_id, value, entity=entity)\n key = self._option_key(option_id, entity)\n self._config[key].add(value, priority, source)", "def set_config(self, **config_opt) -> None:\n for name, default in self.CONFIG_DEFAULTS.items():\n if name in config_opt:\n self.__setattr__(name, config_opt[name])\n elif name not in self.__dict__:\n self.__setattr__(name, default)", "def _override_opt(self, new_opt):\n model_args = {\n 'arch',\n 'encoder-embed-dim',\n 'encoder-layers',\n 'decoder-embed-dim',\n 'decoder-layers',\n 'decoder-out-embed-dim',\n 'decoder-attention',\n }\n\n for k, v in new_opt.items():\n if k not in model_args:\n # skip non-model args\n continue\n if k not in self.opt:\n print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))\n elif self.opt[k] != v:\n print('Overriding option [ {k}: {old} => {v}]'.format(\n k=k, old=self.opt[k], v=v))\n self.opt[k] = v\n return self.opt", "def set_option(config, key, interactive, logger, message=''):\n message = message or \"\\nSet %s to (%s) \\nor enter new option: \"\n value = config.get(key)\n name = key.upper()\n if interactive:\n value = raw_input(message % (name, value)) or value\n \n config[key] = value\n return config" ]
[ "0.78451264", "0.70681787", "0.6733727", "0.6711261", "0.66349", "0.6577663", "0.6528208", "0.64882463", "0.6443928", "0.64062303", "0.63511324", "0.63351065", "0.6329237", "0.6310009", "0.63006294", "0.6278292", "0.6224972", "0.62127936", "0.62003744", "0.6130664", "0.612441", "0.6118531", "0.6104329", "0.60995746", "0.6089505", "0.6089505", "0.60363096", "0.60211515", "0.6006536", "0.5991705" ]
0.7708194
1
Get all table rows from the first tbody element found in soup parameter
def get_table_rows(soup): tbody = soup.find('tbody') return [tr.find_all('td') for tr in tbody.find_all('tr')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_by_id(soup, id):\n # dont include .tbody after the find() for some reason\n html_table = soup.find(id=id)\n if html_table is None:\n return None\n rows = html_table.find_all('tr')[1:]\n return [row.contents for row in rows]", "def process_table(soup):\n rests = []\n rows = soup.find('table',attrs={\"id\" : \"rest_list\"}).find_all('tr')\n for r in rows[1:len(rows)]:\n rdict = rest_row(r)\n if rdict:\n rests.append(rdict)\n return rests", "def extract_main_table_from_html(html):\n soup = bs(html, 'html.parser')\n table = soup.find('table')\n return(table)", "def parse_table_in_rows(self, table):\n parsed_table = []\n for tr in table.find_elements_by_tag_name('tr'):\n parsed_table.append(tr)\n return parsed_table", "def scrape_table_data(url):\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'html.parser')\n\n return soup.select('table.wikitable.sortable td')", "def body(self) -> ComponentTableBody:\n res = []\n raw_rows = self.wait_for_elements_by_tag_name('tr')[1:]\n\n for row in raw_rows:\n res.append(ComponentTableRow(row))\n\n return ComponentTableBody(res)", "def get_table_row_values(self):\n tag_items = self.soup.find_all(\"tr\")\n table_rows = []\n for tag_item in tag_items:\n tag_child_item_values = tag_item.find_all(\"td\")\n tag_item_child_values = []\n for tag_child_item_value in tag_child_item_values:\n tag_item_child_values.append(tag_child_item_value.text.strip())\n table_rows.append(tag_item_child_values)\n return table_rows", "def parse_soup(self, table):\n rows = table.find_all('tr')\n list_of_lists = list()\n time = pd.Timestamp('now')\n for row in rows:\n row_list = list()\n row_list.append(time)\n for td in row.find_all('td')[1:]:\n row_list.append(td.text)\n if td('a'):\n for a in td('a'):\n if a.get('href'):\n m = re.search('teamId\\=(\\d+)', a.get('href'))\n if m:\n row_list.append(m.group(1))\n list_of_lists.append(row_list)\n return [[y for y in x if y] for x in list_of_lists[3:]]", "def get_html_content():\n url = \"https://www.worldometers.info/coronavirus/\"\n req_data = requests.get(url).text\n soup = BeautifulSoup(req_data, 'html.parser')\n html_data = soup.select(\"#main_table_countries_today > tbody:nth-child(2) > tr[style='']\")\n return html_data", "def _get_snippets_html(soup):\n return soup.find_all(name='tr', attrs=['even', 'odd'])", "def _get_all_table_elements(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n cell_text = cell.text\n if \"VIEW\" in cell_text:\n yield (cell.get_attribute(\"href\"), cell_text)\n else:\n yield cell_text", "def get_all_data_from_main_table(soup_list):\n year_growth_list_all_pages = []\n\n for i in soup_list:\n year_growth_list_all_pages.append(get_data_from_main_table(i))\n return year_growth_list_all_pages", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def extract(soup):\r\n table = soup.find('div', id='dnn_ctr11396_TimeTableView_PlaceHolder').find('table')\r\n rows = table.findChildren('tr', recursive=False)\r\n return [[col.findAll('div', {'class': 'TTLesson'}) for col in row.findChildren('td', recursive=False)[1:]]\r\n for row in rows[1:]]", "def get_lift_rows(self):\n lift_rows = []\n\n for element in self.big_table[3].find_all('tr'):\n td_s = element.find_all('td')\n row = [i.text for i in td_s]\n lift_rows.append(row)\n\n return lift_rows", "def get_tables():\n page_html = requests.get(conf.PAGE_URL).text\n soup = BeautifulSoup(page_html, 'html.parser')\n tables = soup.find_all(\"table\", {\"class\": conf.TABLE_CLASS_NAME})\n if not tables:\n raise ValueError(\"Table class not found\")\n return tables", "def get_table_data(table):\n pattern_body = re.compile(r'(?ims)\\<tbody\\>(.*?)\\</tbody\\>')\n pattern_rows = re.compile(r'(?ims)\\<tr\\>(.*?)\\</tr\\>')\n pattern_cols = re.compile(r'(?ims)\\<td.*?\\>([^<]+?)\\<.*?/td\\>')\n\n body = pattern_body.findall(table)[0]\n return [\n list(map(lambda x: html.unescape(x), pattern_cols.findall(row)[:3]))\n for row in pattern_rows.findall(body)]", "def get_flights_rows(tree, flight_table):\n return tree.xpath('.//*[@class=\"{} block\"]//tr[attribute::role]'.format(flight_table))", "def remove_empty_tables(soup):\n return remove_empty_tags(soup, 'table')", "def parse_table(table):\n rows = table.find_all('tr')\n if not rows:\n raise ValueError(\"No rows for table\")\n pages = []\n table_tag = \"<table>\"\n tbl_headers = get_tbl_headers(rows)\n table_tag += \"<tr>\"\n for header in tbl_headers.keys():\n table_tag += conf.ADD_TH_TAG(header)\n table_tag += \"</tr>\"\n for row in rows:\n cols = row.find_all('td')\n if not cols:\n continue\n for page_name in cols[0].find_all('a'):\n if not page_name:\n continue\n pages.append(page_name.text)\n table_tag += '<tr>'\n for header, col in tbl_headers.items():\n try:\n table_tag += f\"<td>{preprocess_data(f'{header} : {cols[col].text}')} \\t</td>\"\n except IndexError:\n pass\n table_tag += '</tr>'\n table_tag += '</table>'\n if conf.DOWNLOAD_IMAGES:\n download_images(pages)\n return table_tag", "def get_contents(ulist, rurl):\n soup = BeautifulSoup(rurl, 'lxml')\n trs = soup.find_all('tr')\n for tr in trs:\n ui = []\n for td in tr:\n ui.append(td.string)\n ulist.append(ui)\n del ulist[0:2]\n del ulist[-1]", "def _table_data_text(table):\n\n def row_get_data_text(tr, coltag=\"td\"): # td (data) or th (header)\n return [td.get_text(strip=True) for td in tr.find_all(coltag)]\n\n rows = []\n trs = table.find_all(\"tr\")\n header_row = row_get_data_text(trs[0], \"th\")\n if header_row: # if there is a header row include first\n rows.append(header_row)\n trs = trs[1:]\n for tr in trs: # for every other table rows\n rows.append(row_get_data_text(tr, \"td\")) # data row\n\n return rows", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def get_table_entries(self, soup, table_id):\r\n table_soup = soup.find(id=table_id).find_parent('tr')\r\n tr_list = table_soup.find_next_siblings('tr')\r\n\r\n bundle_contents = []\r\n for row in tr_list:\r\n # normally, this is a name template\r\n name_template_sibling = row.find(id='nametemplate')\r\n\r\n # if its a quality item do this\r\n bundle_quality_sibling = row.find(id='qualitycontainersm')\r\n\r\n if name_template_sibling is not None:\r\n bundle_link = name_template_sibling.find('a')\r\n bundle_contents.append(bundle_link.attrs['title'])\r\n elif bundle_quality_sibling is not None:\r\n bundle_quality_item = bundle_quality_sibling.find_parent('td')\r\n bundle_td = bundle_quality_item.find_next_sibling('td')\r\n # strip out link from td text\r\n bundle_contents.append(bundle_td.text.strip())\r\n\r\n return bundle_contents", "def find_table(self):\n tables = self.document.tables\n header = []\n for table in tables:\n for row in table.rows:\n header[:] = []\n for cell in row.cells:\n for para in cell.paragraphs:\n header.append(para.text.strip(' '))\n # new versions of final CAPA's keep project information in a table\n if 'Project Information' in header:\n self.read_new_format(table)\n # check if elements in findings is also in header\n cond = len(header) == 5 and header[4] == 'Rating'\n if cond or [x for x in self.findings for y in header if x in y] == self.findings:\n self.table = table\n return", "def _parse_result_page(self, page):\n items = []\n table = list(page.findall(\".//table[@id='browse']\"))[0]\n for row in (x for x in list(table.findall('tr'))[1:]\n if len(x.getchildren()) != 1):\n item = self._parse_item_row(row)\n items.append(item)\n return items", "def scrapeTable():\n\tfrom bs4 import BeautifulSoup\n\tfrom urllib2 import urlopen\n\n\turl = \"https://en.wikipedia.org/wiki/List_of_the_largest_libraries_in_the_United_States\"\n\n\t# read the html content\n\thtml = urlopen(url).read()\n\n\t# create BeautifulSoup from html\n\ttable = BeautifulSoup(html)\n\n\t# find all table row elements\n\trows = table.findAll('tr')\n\n\tarr = []\n\tfor tr in rows:\n\n\t\t# find all columns\n\t\tcols = tr.findAll('td')\n\n\t\t# column text\n\t\tx = [c.text for c in cols]\n\n\t\t# filter the content\n\t\tif len(x)!=0:\n\t\t\ttry:\n\t\t\t\tint(x[0])\n\t\t\texcept Exception, e:\n\t\t\t\tbreak\n\n\t\t\tarr.append(x)\n\n\treturn arr", "def _html_to_list(self):\n\n # Parse the HTML as a string\n soup = BeautifulSoup(self.html_string, 'html.parser')\n\n # Loop through all tables in soup\n for table in soup.find_all('table'):\n # Init table data as an empty list\n table_data = []\n\n # Loop trough all rows\n for row in table.find_all('tr'):\n # Loop through all columns, get the text and convert to lower case only.\n row_data = [col.get_text().lower() for col in row.find_all('td')]\n\n # Append to table_data:\n table_data.append(row_data)\n\n # Append to tables:\n self.tables.append(table_data)", "def _get_rows(self) -> List[htmler.Tr]:\n r = []\n\n for i in range(len(self.value)):\n row_widgets = {w.uid: w for w in self._get_widgets()} # type: Dict[str, Abstract]\n for w_name, w_value in self.value[i].items():\n row_widgets[w_name].value = w_value\n\n r.append(self._get_row(list(row_widgets.values()), i))\n\n return r", "def get_all_table_ids(self, soup):\r\n header_id_lists = []\r\n\r\n tables_to_traverse = soup.find_all('table')\r\n\r\n for table in tables_to_traverse:\r\n first_row = table.find('tr')\r\n first_th = first_row.find('th')\r\n if first_th is not None:\r\n if first_th.has_attr('id'):\r\n header_id_lists.append(first_th.attrs['id'].replace(\"_\", \" \"))\r\n return header_id_lists" ]
[ "0.71404964", "0.7040352", "0.6871531", "0.6789087", "0.64531195", "0.61592925", "0.6042625", "0.60175085", "0.59792954", "0.5929166", "0.5919311", "0.5864989", "0.5840617", "0.58360916", "0.5779402", "0.5740975", "0.5739607", "0.5673302", "0.5656826", "0.56020576", "0.55975246", "0.5592554", "0.5576099", "0.55693185", "0.5535445", "0.5464792", "0.54253674", "0.5414366", "0.53984207", "0.53974617" ]
0.7689844
0
Get the content view area from the kicktipp page.
def get_kicktipp_content(browser: RoboBrowser): content = browser.find_all(id='kicktipp-content') if content[0]: return content[0] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_content(self):\n return self.current_window.content", "def GetContentWindow(self):\n return self.FindWindow(\"content\")", "def __call__(self):\n return ILayoutAware(self.context).content", "def GetPane(self):\r\n \r\n return self.pane", "def _getContentArea(self):\n drawingArea = self._getDrawingArea()\n scale = self._getScale(drawingArea)\n contentSize = self._getContentSize() * scale\n contentArea = Rect(drawingArea.x, drawingArea.y, contentSize.width, contentSize.height)\n contentArea = contentArea.CenterIn(drawingArea)\n return contentArea", "def visible_content(self):\n return self.content[self.position:self.position + self.visible_height]", "def content(self):\n return self._kml['content']", "def get_content_object(self):\r\n return self.content_object", "def get_content(self):\r\n view = self.window.active_view()\r\n selection = \"\"\r\n for region in view.sel():\r\n # If no selection, use the entire file as the selection\r\n if region.empty():\r\n selection = sublime.Region(0, view.size())\r\n else:\r\n selection = region\r\n return view.substr(selection)", "def get_help_content(burl):\n\n box_content = ''+\\\n '<div class=\"box-top\">' +\\\n ' <div class=\"row\">'+\\\n ' <div class=\"col-lg-12 col-md-12 col-sm-12 col-xs-12\">'+\\\n ' <div class=\"box-part rounded sa-center-content\" style=\"'+\\\n theme_return_this('', 'border-style:solid; border-width:thin; border-color:#343a40;') +'\">'+\\\n get_help_tabs(burl) +\\\n ' </div>'+\\\n ' </div>'+\\\n ' </div>'+\\\n '</div>'\n return box_content", "def get_view(self):\n return self.view", "def get_page(self):\n return self._get_page(self.mode)", "def GetPane(self):\r\n\r\n return self._pPane", "def content(self, **args):\n return self.pageConfig['content'] % self.pageConfig", "def get_content(self):\n return self.content", "def get(self, id):\n if id == 'body':\n return window.document.body\n else:\n return self.instances[id]", "def get_post_view(self, instance):\n \n return instance.postview.view", "def _currentPage(self):\n view = self._window.currentBrowser()\n if view is None:\n return None\n \n return view.page()", "def content(self):\r\n return self.content_stack(self.infobar, self._content)", "def GetPanePart(self, wnd):\r\n\r\n for part in self._uiparts:\r\n if part.type == AuiDockUIPart.typePaneBorder and \\\r\n part.pane and part.pane.window == wnd:\r\n return part\r\n\r\n for part in self._uiparts:\r\n if part.type == AuiDockUIPart.typePane and \\\r\n part.pane and part.pane.window == wnd:\r\n return part\r\n \r\n return None", "def _currentPageSettings(self):\n view = self._window.currentBrowser()\n if view is None:\n return None\n \n return view.page().settings()", "def _get_pad_content(self):\n self.ensure_one()\n return self.pad_get_content(self.description_pad)", "def _get_paste_page_content(self, url):\n paste_raw_url = self._get_post_url(url)\n paste_content = self._make_request(paste_raw_url, to_json=False)\n return paste_content.text", "def app_layout(self):\n return self.pt_app.layout", "def get_page(self):\n\n\t\treturn self.__page", "def get_page(self):\n return self.page", "def current_container(self):\n return self.layout.container", "def current_html(self):\n return unicode(self.page().mainFrame().toHtml())", "def get_view(self):\n for w in self.child_widgets():\n return w", "def contentlevel(self):\n return self.get(\"contentLevel\")" ]
[ "0.59836257", "0.59416074", "0.573304", "0.5718334", "0.5627919", "0.55299896", "0.55246526", "0.5503003", "0.550144", "0.5439835", "0.5401629", "0.5347948", "0.5335341", "0.53223467", "0.532178", "0.5301802", "0.5276638", "0.52707845", "0.5268684", "0.52670157", "0.5233209", "0.5231102", "0.52171147", "0.5211221", "0.5206686", "0.52053547", "0.51692766", "0.5168786", "0.5165523", "0.5162646" ]
0.671086
0
Place bets on all given communities.
def place_bets(browser: RoboBrowser, communities: list, predictor, override=False, deadline=None, dryrun=False, matchday=None): for com in communities: print("Community: {0}".format(com)) matches = parse_match_rows(browser, com, matchday) submitform = browser.get_form() for field_hometeam, field_roadteam, match in matches: if not field_hometeam or not field_roadteam: print("{0} - no bets possible".format(match)) continue input_hometeam_value = submitform[field_hometeam.attrs['name']].value input_roadteam_value = submitform[field_roadteam.attrs['name']].value if not override and (input_hometeam_value or input_roadteam_value): print("{0} - skipped, already placed {1}:{2}".format(match, input_hometeam_value, input_roadteam_value)) continue if deadline is not None: if not is_before_dealine(deadline, match.match_date): time_to_match = match.match_date - datetime.datetime.now() print("{0} - not betting yet, due in {1}".format(match, timedelta_tostring(time_to_match))) continue homebet, roadbet = predictor.predict(match) print("{0} - betting {1}:{2}".format(match, homebet, roadbet)) submitform[field_hometeam.attrs['name']] = str(homebet) submitform[field_roadteam.attrs['name']] = str(roadbet) if not dryrun: browser.submit_form(submitform, submit='submitbutton') else: print("INFO: Dry run, no bets were placed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def place_bets(self, market=None, market_bets=None):\n venue = market['event']['venue']\n name = market['marketName']\n if market_bets:\n for strategy_ref, strategy_bets in market_bets.items():\n live_strategy = betbot_db.strategy_repo.is_live(strategy_ref)\n retry_count = 0\n while len(strategy_bets) > 0: # Some orders may not execute first time around.\n # Set limit order prices as this may be an order re-submission.\n for strategy_bet in strategy_bets:\n runner_book = self.get_runner_book(market['marketId'], strategy_bet['selectionId'])\n size = strategy_bet['limitOrder']['size']\n side = strategy_bet['side']\n strategy_bet['limitOrder']['price'] = self.determine_price(side, size, runner_book)\n # Place bets via the Betfair API (or simulate it).\n if self.live_mode and live_strategy:\n resp = self.api.place_bets(market['marketId'], strategy_bets, strategy_ref)\n else:\n resp = self.simulate_place_bets(market, strategy_bets, strategy_ref)\n # Evaluate the API response.\n if type(resp) is dict and 'status' in resp:\n if resp['status'] == 'SUCCESS':\n # Check for execution and persist.\n success_refs = []\n for instruction in resp['instructionReports']:\n # If the order didn't execute, mark the instruction as settled immediately.\n if 'orderStatus' in instruction and instruction['orderStatus'] == 'EXECUTION_COMPLETE':\n instruction['settled'] = False\n success_refs.append(instruction['instruction']['customerOrderRef'])\n else: # Fill-or-Kill Limit Order EXPIRED so nothing to settle.\n instruction['settled'] = True\n # Add the strategy reference for display purposes.\n instruction['customerStrategyRef'] = strategy_ref\n betbot_db.instruction_repo.insert(market, instruction)\n # Remove any instructions that have executed, leaving any that EXPIRED.\n strategy_bets = [x for x in strategy_bets if x['customerOrderRef'] not in success_refs]\n self.logger.info('Successfully placed %s bet(s) on %s %s.' % (strategy_ref, venue, name))\n else:\n self.logger.error(\n 'Failed to place %s bet(s) on %s %s. (Error: %s)' %\n (strategy_ref, venue, name, resp['errorCode']))\n # Set the market as skipped, it's too late to try again.\n betbot_db.market_repo.set_skipped(market, resp['errorCode'])\n else:\n msg = 'Failed to place %s bet(s) on %s %s - resp = %s' % (strategy_ref, venue, name, resp)\n raise Exception(msg)\n retry_count += 1\n if retry_count == 5:\n self.logger.warn(\"Failed to place one or more %s bets 5 times, giving up.\" % strategy_ref)\n break\n # Throttle order re-submissions.\n sleep(1)", "def strategize_community():\n\t\tStrategy.calculate_strategies(Person.recent_memory, int(Person.get_no_of_instances()/2))\n\t\tfor person in Simulation.community:\n\t\t\tSimulation.community[person].give_strategies()", "def __bet(self, numbers: str, user_seed: str) -> None:\n self.BetSource(self.tx.origin, self.tx.timestamp)\n if not self._game_on.get():\n Logger.debug(f'Game not active yet.', TAG)\n revert(f'Game not active yet.')\n amount = self.msg.value\n Logger.debug(f'Betting {amount} loop on {numbers}.', TAG)\n self.BetPlaced(amount, numbers)\n self._take_wager(self.address, amount)\n\n nums = set(numbers.split(','))\n n = len(nums)\n if n == 0:\n Logger.debug(f'Bet placed without numbers.', TAG)\n revert(f' Invalid bet. No numbers submitted. Zero win chance. Returning funds.')\n elif n > 20:\n Logger.debug(f'Bet placed with too many numbers. Max numbers = 20.', TAG)\n revert(f' Invalid bet. Too many numbers submitted. Returning funds.')\n\n numset = set(WHEEL_ORDER)\n numset.remove('0')\n for num in nums:\n if num not in numset:\n Logger.debug(f'Invalid number submitted.', TAG)\n revert(f' Please check your bet. Numbers must be between 0 and 20, submitted as a comma separated '\n f'string. Returning funds.')\n\n bet_type = self._bet_type.get()\n self._bet_type.set(BET_TYPES[0])\n if bet_type == BET_TYPES[2] or bet_type == BET_TYPES[3]:\n bet_limit = self._bet_limits[0]\n else:\n bet_limit = self._bet_limits[n]\n if amount < BET_MIN or amount > bet_limit:\n Logger.debug(f'Betting amount {amount} out of range.', TAG)\n revert(f'Betting amount {amount} out of range ({BET_MIN} -> {bet_limit} loop).')\n\n if n == 1:\n bet_type = BET_TYPES[4]\n if bet_type == BET_TYPES[1]:\n payout = int(MULTIPLIERS[BET_TYPES[5]] * 1000) * amount // (1000 * n)\n else:\n payout = MULTIPLIERS[bet_type] * amount\n if self.icx.get_balance(self.address) < payout:\n Logger.debug(f'Not enough in treasury to make the play.', TAG)\n revert('Not enough in treasury to make the play.')\n\n spin = self.get_random(user_seed)\n winningNumber = WHEEL_ORDER[int(spin * 21)]\n Logger.debug(f'winningNumber was {winningNumber}.', TAG)\n win = winningNumber in nums\n payout = payout * win\n self.BetResult(str(spin), winningNumber, payout)\n\n if win == 1:\n self._wager_payout(self.address, payout)\n else:\n Logger.debug(f'Player lost. ICX retained in treasury.', TAG)", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def populate_community():\n\t\tfor i in range(1,11):\n\t\t\tSimulation.community[\"person\"+str(i)] = Person(\"person\"+str(i))", "def communities(self, config):\n\n raise NotImplementedError", "def tournament_communication(comm, \n comm_fn=lambda x,y: None,\n comm_kw={}):\n ### Step 1 is to build the tournament braket\n size = comm.Get_size()\n rank = comm.Get_rank()\n rank_list = np.arange(0,size)\n \n tournament = []\n temp_tournament = []\n for idx,value in enumerate(rank_list[::2]):\n value2 = value+1\n temp_tournament.append((value,value2))\n tournament.append(temp_tournament)\n \n if size <= 1:\n tournament = [(0,)]\n \n prev_tournament = tournament[0]\n while len(prev_tournament) != 1:\n temp_tournament = []\n for idx,entry in enumerate(prev_tournament[::2]):\n next_idx = idx*2+1\n keep_rank1 = min(entry)\n if (next_idx+1) > len(prev_tournament):\n temp_tournament.append((keep_rank1,))\n else:\n keep_rank2 = min(prev_tournament[next_idx])\n temp_tournament.append((keep_rank1, keep_rank2))\n tournament.append(temp_tournament)\n prev_tournament = tournament[-1]\n if len(tournament) > 1:\n tournament.append([(0,)])\n \n if tournament == [(0,)]:\n return \n \n idx = 0\n for braket in tournament:\n if rank == 0:\n print(\"Round {} of {}\".format(idx, len(tournament)), flush=True)\n idx += 1\n\n # ### Rank loop is here to emulate parallel execution\n # for rank in rank_list:\n found = False\n for entry in braket:\n if rank in entry:\n found = True\n break\n\n if found:\n comm_fn(comm, entry, **comm_kw)\n # if found:\n # print(\"{}: {}\".format(rank, entry))\n\n return tournament", "def betting_round(self, method, params):\n self.bet_history += [[]]\n current_bets = [self.starting_player] * len(self.agents)\n \n max_bet = 0\n if method == self.deal_cards:\n max_bet = big_blind\n current_bets[self.starting_player] = small_blind\n current_bets[(self.starting_player + 1) % len(self.agents)] = big_blind\n\n (self.all_in[self.starting_player], bet) = self.normalize_bet(self.chips[self.starting_player], method(self.agents[self.starting_player], params[self.starting_player]), max_bet)\n self.in_game[self.starting_player] = (not self.all_in[self.starting_player])\n current_bets[self.starting_player] = bet\n self.chips[self.starting_player] -= bet\n check = True if bet == 0 else False\n max_bet = max(max_bet, bet)\n self.pot += bet\n self.bet_history[-1] += [bet]\n\n raised_player = self.starting_player\n i = (raised_player + 1) % len(self.agents)\n\n if method == self.deal_cards:\n # raised_player = (self.starting_player + 1) % len(agents)\n check = False\n if bet > max_bet:\n raised_player = i\n max_bet = bet\n\n if bet == 0:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n while (i != raised_player) and (not self.all_in[i]) and (current_bets[i] <= max_bet):\n if self.in_game[i]:\n (self.all_in[i], bet) = self.normalize_bet(self.chips[i], method(self.agents[i], params[i]), max_bet)\n self.in_game[i] = (not self.all_in[i])\n delta_bet = max(0, bet - current_bets[i])\n current_bets[i] = bet\n self.chips[i] -= delta_bet\n self.pot += delta_bet\n self.bet_history[-1] += [bet]\n\n if bet > max_bet:\n check = False\n raised_player = i\n max_bet = bet\n\n if bet == 0 and not check:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n i = (i + 1) % len(self.agents)", "def mine_all(self):\n\n # Query databse\n query_string = \"SELECT * from planets_in_range;\"\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Check planets in range\n for ship in results:\n self.mine(str(ship[0]), str(ship[1]))", "def test_places_all_ships(self):\n player = TestPlayer()\n self.ai.place_ships(player)\n sorted_ships = sorted(player.placed_ships)\n self.assertEqual([2, 3, 3, 4, 5], sorted_ships)", "def collect_money(self) -> None:\r\n for player in self.players:\r\n self.money_stack.add_money(player.pay_round_money())", "def setUpSocialStructure(self):\n \n # create clans\n clans = range(self.parameters[\"nClans\"]) * int(math.ceil(self.nAgents/float(self.parameters[\"nClans\"])))\n clans = clans[:self.nAgents]\n self.clans = sorted(clans)\n \n # fill compounds\n self.compounds = [0]\n currentComp = 0\n\t\t\n\t\t\n\t\t#TODO: does this mean that some compounds have people from multiple clans?\n for i in range(len(self.clans))[1:]:\n \tself.compounds.append(currentComp)\n \tif self.compounds.count(currentComp)>= self.parameters[\"MaxSizeCompounds\"]:\n \t\tcurrentComp += 1\n\n# self.clans = np.array(self.clans)\n# self.compounds = np.array(self.compounds) \t\n\t\t# get social structures\n self.popStructure = self.getSocialStructure()", "def placeBets(self):\n if self.game_state.board_state == PokerGameState.BOARD_STATE_PRE_FLOP:\n self.game_state.current_turn_index = self.dealer\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n else:\n self.game_state.current_turn_index = (self.dealer+1)%self.num_players\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n if DEBUG:\n print \"current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n print \"current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n\n \"\"\" Run an initial decision so that current_turn_index doesn't equal current_final_decision_index \"\"\"\n if DEBUG:\n print \"Getting initial poker decision from \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n poker_decision = self.game_state.player_list[self.game_state.current_turn_index].getPokerDecision(self.game_state, self.decision_list)\n self.handleDecision(poker_decision)\n self.game_state.current_turn_index = (self.game_state.current_turn_index + 1) % self.num_players\n if DEBUG:\n print \"current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n print \"current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n\n while int(self.game_state.current_turn_index) != int(self.game_state.current_final_decision_index):\n if self.game_state.numActive() == 1:\n return\n if DEBUG:\n print \"Getting poker decision from \" + str(self.game_state.player_list[self.game_state.current_turn_index].name) + \"...\"\n poker_decision = self.game_state.player_list[self.game_state.current_turn_index].getPokerDecision(self.game_state, self.decision_list)\n self.handleDecision(poker_decision)\n self.game_state.current_turn_index = (self.game_state.current_turn_index + 1) % self.num_players\n if poker_decision.action_type == PokerDecision.ACTION_TYPE_RAISE:\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n if DEBUG:\n print \"Next current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n print \"Next current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n print \"while() cond: \" + str(int(self.game_state.current_turn_index) != int(self.game_state.current_final_decision_index))", "def make_bet(self, amount):\n self.update_fear(amount)\n self.bot.bet(amount)", "def run_bp(self, niter):\n for v in self.vs.values():\n v.init_received()\n for f in self.fs:\n f.init_received()\n marg = {v: self.get_marginal(v) for v in self.vs}\n for it in range(niter):\n for v in self.vs.values():\n v.send()\n for f in self.fs:\n f.send()\n for v in self.vs:\n marg[v] = np.vstack((marg[v], self.get_marginal(v)))\n domains = {v.name: v.orig_domain for v in self.vs.values()}\n return (marg, domains, self.vobs)", "async def create_communities(self):\n async with self.web_session.get(self._api_communities_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_communities_url):\n data = await resp.json()\n user_communities = data.get(\"communities\")\n self.all_communities = create_community_objects(user_communities)", "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)", "def bet(user: User, users_list: List[User]) -> None:\n for match in search_matches():\n print()\n to_print= ' '.join(match.text.split('\\n')[:2])\n print()\n print(f\"Looking into {to_print}\")\n while True:\n try:\n match.click() #goes to match page\n WebDriverWait(driver, 10).until(EC.url_changes('https://www.bet365.com/#/IP/B1'))\n break\n except TimeoutException:\n pass\n \n fav, other, balan, time = favorite()\n if not time: #if \"resultado final\" is not displayed or time > 87 min, so there's no bet to be made here\n WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"ip-ControlBar_BBarItem\"))).click()\n continue\n\n USERS = users_list\n for strategy, params in STRATEGIES.items():\n owned = user.get_money() #inside the loop cause one bet may change this value\n \n #if \"resultado final\" is not avaiable fav is set based on params. If other has enough appm/cg to bet, it'll be fav\n if strategy != 'h1' and (other['appm'] >= params['appm']) and (other['cg'] >= params['cg']):\n fav = other\n \n if params['state']: #checks if strategy is active\n print(f'Testing strategy {strategy}.', end=\" \")\n print(f'appm={fav[\"appm\"]}. cg={fav[\"cg\"]}. rend={fav[\"rend\"]}. balan={balan}. cga-cgb={fav[\"cg\"] - other[\"cg\"]}')\n if time > 10*60 and strategy == 'e1':\n strategy == 'e11' \n\n if (strategy == 'e1' or strategy == 'e11') and match_condition(fav, other, balan, time, strategy) and not important_event(time):\n print(f'{strategy} approved.')\n if time < 9*60+30:\n for user_bet in USERS:\n while time < 9*60+30:\n try:\n if user_bet != USERS[0]:\n print('inside if')\n login(user_bet)\n owned = user_bet.get_money()\n place_bet_e1(fav, strategy, user, time, owned)\n time = get_time()\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n except AttributeError:\n pass\n if get_time() > 9*60+30:\n break\n login(user)\n break\n \n else:\n for user_bet in USERS:\n while time < 54*60+30:\n try:\n if user_bet != USERS[0]:\n login(user_bet)\n owned = user.get_money()\n place_bet_e1(fav, strategy, user, time, owned)\n time = get_time()\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n except AttributeError:\n pass\n if get_time() > 54*60+30:\n break\n login(user)\n break\n\n if strategy == 'e2' and (fav['gol'] <= other['gol']) and match_condition(fav, other, balan, time, strategy) and not important_event(time): #checks if gol or card in last minute\n print(f'{strategy} approved.')\n for user_bet in USERS:\n while time < 34*60+30:\n try:\n if user_bet != USERS[0]:\n login(user_bet)\n owned = user.get_money()\n place_bet_e24(fav, strategy, '1', user, time, owned)\n time = get_time()\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n except AttributeError:\n pass\n if get_time() > 34*60+30:\n break\n login(user)\n\n if strategy == 'e3' and (fav['gol'] <= other['gol']) and match_condition(fav, other, balan, time, strategy) and not important_event(time):\n print(f'{strategy} approved.')\n for user_bet in USERS:\n while time < 39*60:\n try:\n if user_bet != USERS[0]:\n login(user_bet)\n owned = user.get_money()\n place_bet_e35(fav, strategy, '1', ['asiático'], user, time, owned)\n time = get_time()\n while time < 39*60:\n place_bet_e35(fav, strategy, '1', ['não asiático', '1.0'], user, time, owned)\n time = get_time()\n while time < 39*60:\n place_bet_e35(fav, strategy, '1', ['não asiático', '1.5'], user, time, owned)\n time = get_time()\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n except AttributeError:\n pass \n if get_time() > 39*60:\n break\n login(user)\n\n if strategy == 'e4' and (fav['gol'] <= other['gol']) and match_condition(fav, other, balan, time, strategy) and not important_event(time):\n print(f'{strategy} approved.')\n for user_bet in USERS:\n while time < 76*60:\n if user_bet != USERS[0]:\n login(user_bet)\n owned = user.get_money()\n place_bet_e24(fav, strategy, '2', user, time, owned)\n time = get_time()\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n if get_time() > 76*60:\n break\n login(user)\n \n if strategy == 'e5' and (fav['gol'] <= other['gol']) and match_condition(fav, other, balan, time, strategy) and not important_event(time):\n print(f'{strategy} approved.')\n for user_bet in USERS:\n while time < 87*60:\n try:\n if user_bet != USERS[0]:\n login(user_bet)\n owned = user.get_money()\n place_bet_e35(fav, strategy, '2', ['asiático'], user, time, owned)\n time = get_time()\n while time < 87*60:\n place_bet_e35(fav, strategy, '2', ['não asiático', '1.0'], user, time, owned)\n time = get_time()\n while time < 87*60:\n place_bet_e35(fav, strategy, '2', ['não asiático', '1.5'], user, time, owned)\n time = get_time()\n break\n break\n if user_bet.risk_management():\n RISK_USERS.append(user_bet)\n logout()\n break\n except AttributeError:\n pass \n if get_time() > 39*60:\n break\n login(user)\n \n if strategy == 'h1' and match_condition(fav, other, balan, time, strategy):\n print(f'{strategy} approved.')\n for user_bet in USERS:\n if user_bet != USERS[0]:\n login(user_bet)\n time_limit = datetime.datetime.now() + datetime.timedelta(seconds=15)\n while datetime.datetime.now() < time_limit:\n try:\n place_bet_h1(fav, user, owned)\n break\n except AttributeError:\n pass\n \n USERS = list(filter(lambda user: user not in RISK_USERS, USERS)) #filters users\n WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"ip-ControlBar_BBarItem\"))).click() #returns to all live matches page", "def updateFromBets(self, bets: dict, playersView: Player):\n betZero = False\n zeroBetters = []\n aceProb = 0\n for k in bets.keys():\n v = bets[k]\n if v == 0:\n zeroBetters.append(k)\n aceProb += self[playersView, k, Card(Suit.spade, 14)]\n betZero = True\n\n if betZero:\n for p in Player:\n if p != playersView and p not in zeroBetters:\n self[playersView, p, Card(Suit.spade, 14)] += (aceProb / (4 - (len(zeroBetters) + 1)))\n\n if betZero:\n for p in zeroBetters:\n self[playersView, p, Card(Suit.spade, 14)] = 0\n\n #print(zeroBetters)", "def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)", "def sellOutStrategies(self, strategies):\n\n # GET ALL SECONDARY_AGG POSITIONS AND SELL THEM\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"$or\": strategies, \"Asset_Type\": self.asset_type, \"Account_ID\" : self.account_id})\n\n for position in open_positions:\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n queued = self.queue.find_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": position[\"Asset_Type\"], \"Account_ID\" : self.account_id})\n\n if not queued:\n\n self.placeOrder(trade_data, position, orderType=\"MARKET\")", "def place_cities(self, n=20):\n self.city_score = self.flow ** 0.5\n self.city_score[self.elevation[:-1] <= 0] = -9999999\n self.cities = []\n while len(self.cities) < n:\n # location of potential new city is place with maximum score\n newcity = np.argmax(self.city_score)\n\n # Only place cities between 0.1 and 0.9 axes.\n city_max_ax = 0.85\n city_min_ax = 0.15\n # Chance that this location has no city, scales with number of cities placed so far\n if (\n np.random.random() < (len(self.cities) + 1) ** -0.2\n and city_min_ax < self.vxs[newcity, 0] < city_max_ax\n and city_min_ax < self.vxs[newcity, 1] < city_max_ax\n ):\n self.cities.append(newcity)\n\n # penalize city score for the newcity location.\n self.city_score -= 0.01 * 1 / (distance(self.vxs, self.vxs[newcity, :]) + 1e-9)", "def set_liability(self, people, currency):\n for name, person in people.items():\n if self.name != name:\n \"\"\"if owed is greater than what I have paid then add to liabilities\"\"\"\n diff = self.share - person.share\n if diff < 0:\n liab_dict = {'from':self.name,'to': name,'amount':-diff, 'currency':currency }\n self.liabilities.append(liab_dict)", "def lancement_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n number_of_ships: int):\n # Victoire devient True quand un joueur détruit tout les bateaux adverse\n victoire = False\n # PHASE 1 PLACEMENT BATEAU\n\n positionner_bateau(joueur1, number_of_ships)\n positionner_bateau(joueur2, number_of_ships)\n\n # PHASE 2 VERIFICATION DE L ETAT DES BATEAUX\n while not victoire:\n\n if number_of_ships == 3:\n petite_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 1:\n test_partie(joueur1, joueur2, tableau_invisible_joueur1, tableau_invisible_joueur2)\n\n elif number_of_ships == 5:\n grande_partie(joueur1,\n joueur2,\n tableau_invisible_joueur1, tableau_invisible_joueur2,\n )\n\n if verif_win(joueur2, number_of_ships):\n victoire = True\n print(\"le joueur 1 a gagné\")\n\n if verif_win(joueur1, number_of_ships):\n victoire = True\n print(\"le joueur 2 a gagné\")\n\n envoi_score(joueur1, joueur2)\n afficher_score(joueur1, joueur2)", "def eval_randoms(count):\n\t\tfor person in Simulation.community:\n\t\t\tSimulation.community[person].eval_random_strategy(count)", "def resetPlayerBetAmount(self, players):\n\t\tfor x in players:\n\t\t\tx.betAmount = []", "def place_bet_e24(fav: TeamDict, strategy: str, user: User, time: int, owned: float) -> None:\n part = {'e2': '1', 'e4': '2'}\n tab = None\n group_tabs = driver.find_elements_by_class_name(\"sip-MarketGroup \") #all markets\n try:\n name_tabs = driver.find_elements_by_class_name(\"sip-MarketGroupButton_Text \") #all text in markets\n for i, el_tab in enumerate(name_tabs):\n name = el_tab.text.lower()\n if part[strategy] == '2' and ('escanteio' in name) and ('opções' in name):\n tab = group_tabs[i] #gets desired market\n break \n elif ('escanteio' in name) and (part[strategy] in name) and ('º' in name or 'ª' in name) and ('asiático' not in name):\n tab = group_tabs[i] #gets desired market\n break\n\n assert len(tab.text.split('\\n')) > 1 #this happens when tab is closed \n\n except AssertionError:\n tab.click() #if it's closed, it opens\n if tab == None:\n print(f'Não há apostas de handicap para 1º tempo')\n return\n\n finally:\n rows = tab.find_elements_by_class_name(\"gl-Market \")[0] #first collumn is bet numbers\n j = None\n for i, row in enumerate(rows.text.split('\\n')): #iterates over head text\n row = row.lower()\n if str(fav['esc_tot']+1) in row: #gets what row is bet\n j = i \n break\n if j == None:\n print(f'Bet on {fav[\"esc_tot\"]+1} not avaible')\n return\n bet_collumn = tab.find_elements_by_class_name(\"gl-Market \")[1] #\"mais de\" in collumn 1\n bet_button = bet_collumn.find_elements_by_class_name(\"gl-ParticipantOddsOnly \")[j] #click on bet option\n bet_button.click()\n\n place_bet(fav, strategy, 'mais', user, owned)", "def manyBalls(self):\n self.action.transaction(self.cardUid, 5)\n self.start()", "def make_orders(self):\n\n # orders to cancel from all of the strategies\n ocancel = self.get_cancel_orders()\n\n # orders to update from all of the strategies\n oupdate = self.get_update_orders()\n\n # new orders from all of the strategies\n onew = self.get_new_orders()\n \n # do we need to cancel, update, or make new orders?\n tocancel = bool(ocancel[const.BDAQID] or ocancel[const.BFID])\n toupdate = bool(oupdate[const.BDAQID] or oupdate[const.BFID])\n tonew = bool(onew[const.BDAQID] or onew[const.BFID])\n\n if tocancel:\n betlog.betlog.debug('cancelling orders: {0}'.format(ocancel))\n\n if toupdate:\n betlog.betlog.debug('updating orders: {0}'.format(oupdate))\n\n if tonew:\n betlog.betlog.debug('making new orders: {0}'.format(onew))\n\n if (tocancel or toupdate or tonew):\n \n # we could instead do 'monkey patching' here so we don't\n # need to check this every tick...\n if self.gconf.PracticeMode:\n # we don't make any real money bets in practice mode\n print 'bets not made since in practice mode'\n return\n\n # call multithreaded make orders so that we make all order\n # requests (cancelling, updating, making new) for BDAQ and\n # BF simultaneously.\n corders, uorders, neworders = multi.\\\n make_orders(ocancel, oupdate, onew)\n\n # save the full order information to the order store (this will\n # handle writing to the DB, etc.)\n self.ostore.add_orders(corders, uorders, neworders)\n\n else:\n \n # we need to set latest cancel, update, new orders to be\n # empty.\n self.ostore.latest = [{const.BDAQID: {}, const.BFID: {}}, \n {const.BDAQID: {}, const.BFID: {}}, \n {const.BDAQID: {}, const.BFID: {}}]" ]
[ "0.61001956", "0.57336086", "0.5551799", "0.55417174", "0.53878725", "0.53347605", "0.52753794", "0.52518904", "0.5246189", "0.5228697", "0.522792", "0.51936316", "0.5184653", "0.51783067", "0.5173215", "0.5148297", "0.5125859", "0.5123962", "0.5123266", "0.51071364", "0.51055706", "0.50797224", "0.5064338", "0.50581014", "0.50561315", "0.5012809", "0.49934366", "0.49754867", "0.4960498", "0.49251318" ]
0.7158739
0
plot occultation orders for mtp overview page
def makeOverviewPage(orbit_list, mtpConstants, paths, occultationObservationDict, nadirObservationDict): mtpNumber = mtpConstants["mtpNumber"] obsTypeNames = {"ingress":"irIngressLow", "egress":"irEgressLow"} #loop through once to find list of all orders measured ordersAll = [] for orbit in orbit_list: occultationObsTypes = [occultationType for occultationType in orbit["allowedObservationTypes"][:] if occultationType in ["ingress", "egress"]] for occultationObsType in occultationObsTypes: if occultationObsType in orbit.keys(): obsTypeName = obsTypeNames[occultationObsType] orders = orbit["finalOrbitPlan"][obsTypeName+"Orders"] if 0 in orders: #remove darks orders.remove(0) if "COP#" in "%s" %orders[0]: #remove manual COP selection orders = [] ordersAll.extend(orders) uniqueOccultationOrders = sorted(list(set(ordersAll))) #loop through again to plot each order on a single graph for chosenOrder in uniqueOccultationOrders: title = "Solar occultations for diffraction order %s" %(chosenOrder) fig = plt.figure(figsize=(FIG_X, FIG_Y)) ax = fig.add_subplot(111, projection="mollweide") ax.grid(True) plt.title(title) lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect latsAll = [] altsAll = [] for orbit in orbit_list: occultationObsTypes = [occultationType for occultationType in orbit["allowedObservationTypes"][:] if occultationType in ["ingress", "egress"]] for occultationObsType in occultationObsTypes: if occultationObsType in orbit.keys(): obsTypeName = obsTypeNames[occultationObsType] orders = orbit["finalOrbitPlan"][obsTypeName+"Orders"] if chosenOrder in orders: occultation = orbit[occultationObsType] #if lats/lons/alts not yet in orbitList, find and write to list if "alts" not in occultation.keys(): #just plot the half of the occultation closest to the surface, not the high altitude bits #ignore merged or grazing occs at this point if occultationObsType == "ingress": ets = np.arange(occultation["etMidpoint"], occultation["etEnd"], OCCULTATION_SEARCH_STEP_SIZE) elif occultationObsType == "egress": ets = np.arange(occultation["etStart"], occultation["etMidpoint"], OCCULTATION_SEARCH_STEP_SIZE) lonsLatsLsts = np.asfarray([getLonLatLst(et) for et in ets]) occultation["lons"] = lonsLatsLsts[:, 0] occultation["lats"] = lonsLatsLsts[:, 1] occultation["alts"] = np.asfarray([getTangentAltitude(et) for et in ets]) #else take lats/lons/alts from orbitList if already exists lonsAll.extend(occultation["lons"]) latsAll.extend(occultation["lats"]) altsAll.extend(occultation["alts"]) plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \ c=np.asfarray(altsAll), cmap=plt.cm.jet, marker='o', linewidth=0) cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04) cbar.set_label("Tangent Point Altitude (km)", rotation=270, labelpad=20) fig.tight_layout() plt.savefig(os.path.join(paths["IMG_MTP_PATH"], "occultations_mtp%03d_order%i_altitude.png" %(mtpNumber, chosenOrder))) plt.close() """plot nadir orders""" #find all orders measured ordersAll = [] for orbit in orbit_list: if "dayside" in orbit["irMeasuredObsTypes"]: orders = orbit["finalOrbitPlan"]["irDaysideOrders"] if 0 in orders: #remove darks orders.remove(0) if "COP#" in "%s" %orders[0]: #remove manual COP selection orders = [] ordersAll.extend(orders) uniqueNadirOrders = sorted(list(set(ordersAll))) #plot each order for chosenOrder in uniqueNadirOrders: title = "Dayside nadirs for diffraction order %s" %(chosenOrder) fig = plt.figure(figsize=(FIG_X, FIG_Y)) ax = fig.add_subplot(111, projection="mollweide") ax.grid(True) plt.title(title) lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect latsAll = [] anglesAll = [] for orbit in orbit_list: if "dayside" in orbit["irMeasuredObsTypes"]: orders = orbit["finalOrbitPlan"]["irDaysideOrders"] if chosenOrder in orders: nadir = orbit["dayside"] #if lats/lons/incidence angles not yet in orbitList, find and write to list if "incidences" not in nadir.keys(): # print(orbit["orbitNumber"]) #nadir start/end times have been modified to fit thermal room realStartTime = nadir["obsStart"] + PRECOOLING_TIME + INITIALISATION_TIME realEndTime = nadir["obsEnd"] ets = np.arange(realStartTime, realEndTime, NADIR_SEARCH_STEP_SIZE) lonsLatsIncidencesLsts = np.asfarray([getLonLatIncidenceLst(et) for et in ets]) nadir["lons"] = lonsLatsIncidencesLsts[:, 0] nadir["lats"] = lonsLatsIncidencesLsts[:, 1] nadir["incidences"] = lonsLatsIncidencesLsts[:, 2] #else take lats/lons/incidence angles from orbitList if already exists lonsAll.extend(nadir["lons"]) latsAll.extend(nadir["lats"]) anglesAll.extend(nadir["incidences"]) plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \ c=np.asfarray(anglesAll), cmap=plt.cm.jet, marker='o', linewidth=0) cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04) cbar.set_label("Incidence Angle (degrees)", rotation=270, labelpad=20) fig.tight_layout() plt.savefig(os.path.join(paths["IMG_MTP_PATH"], "dayside_nadirs_mtp%03d_order%i_incidence_angle.png" %(mtpNumber, chosenOrder))) plt.close() """write mtp overview page""" h = r"" h += r"<h1>MTP%03d Overview</h1>" %(mtpNumber) h += r"<h2>Geometry</h2>"+"\n" imagename = "mtp%03d_occultation_duration.png" %(mtpNumber) h += r"<img src='%s'>" %imagename imagename = "mtp%03d_occultation_lat.png" %(mtpNumber) h += r"<img src='%s'>" %imagename imagename = "mtp%03d_nadir_minimum_incidence_angle.png" %(mtpNumber) h += r"<img src='%s'>" %imagename h += r"<p>UVIS typically operates on all dayside nadirs and all occultations</p>"+"\n" h += r"<h2>Solar Occultations</h2>"+"\n" h += r"Solar occultation diffraction orders measured this MTP: "+"\n" for chosenOrder in sorted(uniqueOccultationOrders): h += "%i, " %chosenOrder h += r"<br>"+"\n" for chosenOrder in sorted(uniqueOccultationOrders): h += "<h3>Solar occultations for diffraction order %i</h3>" %chosenOrder imagename = "img/occultations_mtp%03d_order%i_altitude.png" %(mtpNumber, chosenOrder) h += r"<img src='%s'>" %imagename h += r"<br>"+"\n" h += r"<br>"+"\n" h += r"<br>"+"\n" h += r"<br>"+"\n" h += r"<h2>Dayside Nadirs</h2>"+"\n" h += r"Dayside nadir diffraction orders measured this MTP: "+"\n" for chosenOrder in sorted(uniqueNadirOrders): h += "%i, " %chosenOrder h += r"<br>"+"\n" for chosenOrder in sorted(uniqueNadirOrders): h += "<h3>Dayside nadirs for diffraction order %i</h3>" %chosenOrder imagename = "img/dayside_nadirs_mtp%03d_order%i_incidence_angle.png" %(mtpNumber, chosenOrder) h += r"<img src='%s'>" %imagename h += r"<br>"+"\n" h += r"<br>"+"\n" # h += r"<h2>SO/LNO Observation Plan</h2>"+"\n" h += r"<br>"+"\n" h += r"<br>"+"\n" h += r"<h2>SO/LNO Observation Dictionaries</h2>"+"\n" h += r"<h3>Solar Occultation</h3>"+"\n" headers = ["Name", "Diffraction Order 1", "Diffraction Order 2", "Diffraction Order 3", "Diffraction Order 4", "Diffraction Order 5", "Diffraction Order 6", "Integration Time", "Rhythm", "Detector Height"] h += r"<table border=1>"+"\n" h += r"<tr>"+"\n" for header in headers: h += r"<th>%s</th>" %header h += r"</tr>"+"\n" for key in sorted(occultationObservationDict.keys()): orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, occultationObservationDict) h += r"<tr>"+"\n" h += r"<td>%s</td>" %(key) if "COP" in orders: h += r"<td>%s (manual mode)</td>" %(orders) for order in range(5): h += r"<td>-</td>"+"\n" else: for order in orders: h += r"<td>%s</td>" %(order) for order in range(6-len(orders)): h += r"<td>-</td>"+"\n" h += r"<td>%i</td>" %(integrationTime) h += r"<td>%i</td>" %(rhythm) h += r"<td>%i</td>" %(detectorRows) h += r"</tr>"+"\n" h += r"</table>"+"\n" h += r"<h3>Nadir/Limb</h3>"+"\n" headers = ["Name", "Diffraction Order 1", "Diffraction Order 2", "Diffraction Order 3", "Diffraction Order 4", "Diffraction Order 5", "Diffraction Order 6", "Integration Time", "Rhythm", "Detector Height"] h += r"<table border=1>"+"\n" h += r"<tr>"+"\n" for header in headers: h += r"<th>%s</th>" %header h += r"</tr>" for key in sorted(nadirObservationDict.keys()): orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, nadirObservationDict) h += r"<tr>"+"\n" h += r"<td>%s</td>" %(key) if "COP" in orders: h += r"<td>%s (manual mode)</td>" %(orders) for order in range(5): h += r"<td>-</td>"+"\n" else: for order in orders: h += r"<td>%s</td>" %(order) for order in range(6-len(orders)): h += r"<td>-</td>"+"\n" h += r"<td>%i</td>" %(integrationTime) h += r"<td>%i</td>" %(rhythm) h += r"<td>%i</td>" %(detectorRows) h += r"</tr>"+"\n" h += r"</table>"+"\n" h += r"<br>"+"\n" h += r"<br>"+"\n" h += r"<p>Page last modified: %s</p>" %(datetime.now().strftime('%a, %d %b %Y %H:%M:%S')) +"\n" with open(os.path.join(paths["HTML_MTP_PATH"], "nomad_mtp%03d_overview.html" %(mtpNumber)), 'w') as f: f.write(h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def order_report():", "def plot_sorted_accuracies(results):\n ###TODO\n #print(results)\n \n #step 1 -> sort accuracies and get x and y\n # x = setting\n # y = sorted list of accuracies\n #results.sort(key=lambda x:(x['accuracy'])) \n # don't use it ->it will change results from main as well\n \n #print(results)\n\n acc = []\n \n x = list(range(len(results)))\n \n for d in results:\n #print('dict=',d)\n acc.append(d['accuracy'])\n \n acc.sort(key=lambda x:(x))\n #print('acc = ',acc)\n \n #step 2 -> plot figure\n fig1 = plt.figure(1) \n plt.plot(x,acc)\n plt.ylabel('accuracy')\n plt.xlabel('settings')\n \n plt.show()\n \n fig1.savefig('accuracies.png')", "def plot_sorted_accuracies(results):\n acc = []\n for comb in results:\n acc.append(comb[\"accuracy\"])\n sorted_list = sorted(acc)\n plt.plot(range(42),sorted_list,'bo-')\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Setting\")\n plt.savefig(\"accuracies.png\")", "def draw_order(self, refresh=False):\n\n x, y = (self.current_order.dispersion, self.current_order.flux)\n \n self.ax_order.lines[0].set_data([x, y])\n \n # Show a few percent either side.\n percent = 2\n trimming = (x[-1] - x[0]) * percent/100.\n self.ax_order.set_xlim(x[0] - trimming, x[-1] + trimming)\n\n self.ax_order.set_ylim(np.nanmin(y), np.nanmax(y))\n\n self.norm_plot.reset_zoom_limits()\n\n #self.ax_order.set_title(\"Order {0} of {1}\".format(\n # 1 + self.current_order_index, \n # len(self.parent.session.input_spectra)))\n\n if refresh:\n self.norm_plot.draw()\n return None", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def drawCountPlot(df, colors, order):\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(15,8))\n sns.set_palette(sns.color_palette(colors))\n sns.countplot(x=df.spectral_type, data=df, order=order)\n plt.show()", "def prop_types(houses:pd.DataFrame) -> None:\n sns.set_style('whitegrid')\n indexNames= houses[houses['PRICE'] >= 3000000].index\n houses= houses.drop(indexNames)\n \n ax= sns.catplot(x= 'PROPERTY_TYPE', y= 'PRICE', kind= 'box', data= houses)\n ax.set_xticklabels(rotation=30)\n plt.tight_layout()\n plt.show()\n \n ax= sns.countplot(x= 'PROPERTY_TYPE', data= houses)\n ax.set_xticklabels(ax.get_xticklabels(), rotation= 30, ha=\"right\", fontsize=9)\n plt.show()", "def printOrders(self, event):\n \n pass", "def plot(model, results, filename):\n\n # c = model.compartments.get_one(id='c')\n #\n # rna_1 = model.species_types.get_one(id='rna_1').species.get_one(compartment=c)\n # rna_2 = model.species_types.get_one(id='rna_2').species.get_one(compartment=c)\n # rna_3 = model.species_types.get_one(id='rna_3').species.get_one(compartment=c)\n #\n pops = results.get('populations')\n time = pops.index\n pop_rna_1 = pops['rna_1[c]']\n pop_rna_2 = pops['rna_2[c]']\n pop_rna_3 = pops['rna_3[c]']\n\n pop_atp = pops['atp[c]']\n pop_gtp = pops['gtp[c]']\n pop_utp = pops['ctp[c]']\n pop_ctp = pops['utp[c]']\n\n pop_amp = pops['amp[c]']\n pop_gmp = pops['gmp[c]']\n pop_ump = pops['cmp[c]']\n pop_cmp = pops['ump[c]']\n\n print(pop_rna_1, pop_atp, pop_gtp, pop_utp, pop_ctp)\n\n fig1, axes1 = pyplot.subplots(nrows=3, ncols=1)\n\n axes1[0].plot(time / 3600, pop_rna_1)\n axes1[0].plot(time / 3600, pop_rna_2)\n axes1[0].plot(time / 3600, pop_rna_3)\n axes1[0].set_xlim((time[0] / 3600, time[-1] / 3600))\n axes1[0].set_ylim((0., 10.0))\n axes1[0].legend(loc='upper right')\n\n axes1[1].plot(time / 3600, pop_atp)\n axes1[1].plot(time / 3600, pop_gtp)\n axes1[1].plot(time / 3600, pop_utp)\n axes1[1].plot(time / 3600, pop_ctp)\n axes1[1].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[1].set_ylim((0., 10.0))\n axes1[1].legend(loc='upper right')\n\n axes1[2].plot(time / 3600, pop_amp)\n axes1[2].plot(time / 3600, pop_gmp)\n axes1[2].plot(time / 3600, pop_ump)\n axes1[2].plot(time / 3600, pop_cmp)\n axes1[2].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[2].set_ylim((0., 10.0))\n axes1[2].legend(loc='upper right')\n\n fig1.savefig(filename.format('species'))\n pyplot.close(fig1)", "def create_plot(df):\n sns.set_style(\"whitegrid\")\n df_reverse = df.iloc[::-1]\n ax = sns.displot(df_reverse, x=\"Report Date\", height=6, aspect=12/6, color=\"mediumseagreen\", edgecolor=\"darkgreen\")\n ax.set(xlabel=\"Report Date\", ylabel=\"Number of cases\")\n plt.title(\"Number of confirmed cases vs reported month\", y=0.85, fontsize=16)\n return plt.savefig(\"../images/fig3\"), plt.close()", "def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))", "def plot_dispatch_comm(pv, demand, E, week=30,flag=False):\n\n sliced_index = (pv.index.week==week)\n pv_sliced = pv[sliced_index]\n demand_sliced = demand[sliced_index]\n self_consumption = E['inv2load'][sliced_index]\n \n direct_self_consumption = np.minimum(pv_sliced,demand_sliced)# E['inv2load'][sliced_index]\n indirect_self_consumption = self_consumption-direct_self_consumption\n res_pv_sliced = E['res_pv'][sliced_index]\n grid2load_sliced = E['grid2load'][sliced_index]\n store2inv_sliced = E['store2inv'][sliced_index]\n LevelOfCharge = E['LevelOfCharge'][sliced_index]\n inv2grid = E['inv2grid'][sliced_index]\n grid2load = E['grid2load'][sliced_index]\n aux=np.maximum(0,self_consumption)\n\n fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(17, 4*3), frameon=False,\n gridspec_kw={'height_ratios': [3, 1, 1], 'hspace': 0.04})\n\n #fig, ax = plt.subplots(figsize=(17, 4))\n axes[0].plot(demand_sliced.index, demand_sliced, color='black', lw=2,label='demand')\n \n if flag:\n axes[0].plot(pv_sliced.index, pv_sliced, color='green', lw=2,label='pv')\n axes[0].plot(direct_self_consumption.index, direct_self_consumption, color='yellow', lw=2,label='DSC')\n axes[0].plot(indirect_self_consumption.index, indirect_self_consumption, color='orange', lw=2,label='ISC')\n axes[0].plot(grid2load_sliced.index, grid2load_sliced, color='red', lw=2,label='grid')\n\n else:\n axes[0].fill_between(direct_self_consumption.index, 0, direct_self_consumption, color='orange', alpha=.8, label='DSC')\n axes[0].fill_between(pv_sliced.index, self_consumption, pv_sliced ,where=pv_sliced<demand_sliced, color='blue', hatch='//',\n alpha=.3,label='ISC')\n axes[0].fill_between(pv_sliced.index, direct_self_consumption, pv_sliced , color='gold', alpha=.3,label='Excess PV')\n\n axes[0].fill_between(grid2load_sliced.index,self_consumption,demand_sliced,color='red',alpha=.2, label='grid2load')\n axes[0].set_ylim([0, axes[0].get_ylim()[1] ])\n axes[0].set_ylabel('Power (kW)')\n\n axes[1].fill_between(LevelOfCharge.index, 0, LevelOfCharge, color='grey', alpha=.2, label='SOC')\n axes[1].set_ylabel('State of Charge (kWh)')\n\n axes[2].fill_between(inv2grid.index, 0, inv2grid, color='green', alpha=.2,label='injected2grid')\n axes[2].fill_between(inv2grid.index, 0, -grid2load, color='red', alpha=.2,label='grid drawn')\n axes[2].set_ylabel('In/out from grid (kW)')\n axes[0].legend()\n axes[1].legend()\n axes[2].legend()\n return", "def transaction_plot(ds):\n import seaborn as sns\n import pandas as pd\n df = pd.DataFrame()", "def get_multiobjective_plot(self):\n fig, ax = plt.subplots()\n\n values = self.stats['multiobj_stats']['episode_totals']\n for i in range(values.shape[1]):\n ax.plot(np.arange(len(values[:, i])), values[:, i],\n color=_COLORS[i % len(_COLORS)], lw=2, alpha=.9,\n label='Objective {}'.format(i))\n ax.legend()\n ax.set_ylabel('Objective value')\n ax.set_xlabel('Episode')\n return fig", "def plot(self):\n pass", "def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')", "def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return", "def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))", "def plot(self):\n # get data without totals\n data = self.woe_report[self.woe_report.index != 'total']\n # setup panel\n fig, axs = plt.subplots(1, 3, figsize=(12, 3))\n plt.subplots_adjust(wspace=0.3)\n # first chart\n data['P(Hi|A)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n data['P(Hi|Ā)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n axs[0].set_title('Probability distribution')\n axs[0].set_xlabel(data.index.name)\n axs[0].set_ylabel('probability')\n axs[0].legend(['P(Hi|A)', 'P(Hi|Ā)'])\n # second chart\n data['weight-of-evidence'].plot(ax=axs[1], linewidth=3, alpha=0.7)\n axs[1].set_title('WoE')\n axs[1].set_xlabel(data.index.name)\n axs[1].set_ylabel('WoE')\n # third chart\n data['information-value'].plot(ax=axs[2], linewidth=3, alpha=0.7)\n axs[2].set_title('Information value')\n axs[2].set_ylabel('IV')", "def _plot_ctc(self, save_path=None, topk=10):\n if self.ctc_weight == 0:\n return\n if len(self.ctc.prob_dict.keys()) == 0:\n return\n from matplotlib import pyplot as plt\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n elen = self.ctc.data_dict['elens'][-1]\n probs = self.ctc.prob_dict['probs'][-1, :elen]\n topk_ids = np.argsort(probs, axis=1)\n plt.clf()\n n_frames = probs.shape[0]\n times_probs = np.arange(n_frames)\n plt.figure(figsize=(20, 8))\n for idx in set(topk_ids.reshape(-1).tolist()):\n if idx == 0:\n plt.plot(times_probs, probs[:, 0], ':', label='<blank>', color='grey')\n else:\n plt.plot(times_probs, probs[:, idx])\n plt.xlabel(u'Time [frame]', fontsize=12)\n plt.ylabel('Posteriors', fontsize=12)\n plt.xticks(list(range(0, int(n_frames) + 1, 10)))\n plt.yticks(list(range(0, 2, 1)))\n plt.tight_layout()\n if save_path is not None:\n plt.savefig(os.path.join(save_path, 'prob.png'))\n plt.close()", "def plot_eta(self, **kwargs):\r\n\r\n # Get the kwargs.\r\n items = kwargs['items']\r\n if type(items) is not list:\r\n items = [items]\r\n if 'desc' in kwargs.keys():\r\n desc = kwargs['desc']\r\n else:\r\n desc = ''\r\n\r\n fig = plt.figure()\r\n ax = plt.subplot(111)\r\n for item in items:\r\n c = item[0]\r\n mode = item[1]\r\n if mode > self.phi.num_modes:\r\n raise Exception(\"!!! Only %s modes in analysis !!!\" % self.phi.num_modes.__str__())\r\n\r\n # Plot the requested modal displacement.\r\n label = 'Mode {0} case: {1}'.format(mode, c)\r\n ax.plot(self.time[c], self.eta[c][mode - 1, :], label=label)\r\n ax.legend()\r\n plt.title('Modal Response of FF: %s' % self.pfile.name)\r\n plt.xlabel('Time (s)')\r\n fig.canvas.set_window_title('{0} {1}'.format(self.name, desc))\r\n plt.show()", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def plot_sammen_vekst(urn, ordlister, window=5000, pr = 100):\n rammer = []\n for ordbag in ordlister:\n vekst = vekstdiagram(urn, params = {'words': ordbag, 'window':window, 'pr': pr} )\n vekst.columns = [ordbag[0]]\n rammer.append(vekst)\n return pd.concat(rammer)", "def plot_representer(representer, order=2, size=32, *args, **kwargs):\n\tif order == 2:\n\t\tx,y = representer.H2\n\telse:\n\t\tx,y = representer.H1\n\txx, yy, ux, uy = representer.current.space.grid_evaluation(x, y, size=size)\n\tplot_stream(xx, yy, ux, uy, representer.current.curve, *args, **kwargs)", "def plot_correlation_graph(temps, offense_counts,label, filename):\n\t#this is to demonstrate line graphs but the data is categorical so you should actually be using bar graphs\n\tfig, ax = plt.subplots()\n\n\n\tplt.scatter(temps, offense_counts, color=\"blue\", marker= 'o', label=label)\n\t\n\tplt.xlabel('Temperature (Celsius)')\n\tplt.ylabel('Number of offenses')\n\tplt.legend()\n\tplt.savefig(filename,format=\"png\")\n\tplt.show()", "def display_results(results, sizes):\r\n plot.xlabel('Array size')\r\n plot.ylabel('Time')\r\n plot.title('Sorting algorithms comparison')\r\n for name, result in results.items():\r\n plot.plot(sizes, result, label=name)\r\n plot.grid(True)\r\n plot.legend()\r\n plot.show()", "def plot_data(self):" ]
[ "0.6188708", "0.59705603", "0.5956688", "0.59423614", "0.581761", "0.5760428", "0.56943524", "0.5671908", "0.5650233", "0.5555503", "0.5514867", "0.540279", "0.5400018", "0.53952277", "0.53590506", "0.5358435", "0.53566706", "0.5345731", "0.53333133", "0.5314275", "0.53101605", "0.53054214", "0.52896667", "0.5289477", "0.5271529", "0.52655566", "0.5258288", "0.52537984", "0.5242754", "0.5238781" ]
0.6303185
0
vscrollbar = Scrollbar(master) vscrollbar.grid(row=0, column=1, sticky=N+S) hscrollbar = Scrollbar(master, orient=HORIZONTAL) hscrollbar.grid(row=1, column=0, sticky=E+W) canvas = Canvas(master, yscrollcommand=vscrollbar.set, xscrollcommand=hscrollbar.set, bg='999999') canvas.grid(row=0, column=0, sticky=N+S+E+W) vscrollbar.config(command=canvas.yview) hscrollbar.config(command=canvas.xview) make the canvas expandable master.grid_rowconfigure(0, weight=1) master.grid_columnconfigure(0, weight=1) frame = Frame(canvas) frame.rowconfigure(1, weight=1) frame.columnconfigure(1, weight=1) canvas.create_window(0, 0, anchor=NW, window=frame) frame.update_idletasks() canvas.config(scrollregion=canvas.bbox("all"))
def AppWinFrame(master): # auto scrolls vscrollbar = Scrollbar(master) vscrollbar.pack(side=RIGHT, fill=Y) canvas = Canvas(master, yscrollcommand=vscrollbar.set, bg='#999999') frame = Frame(canvas, borderwidth=2, relief=RIDGE, bg="#BFB8FE") vscrollbar.config(command=canvas.yview) frame.pack(side=LEFT, fill=BOTH) #frame.update_idletasks() #canvas.config(scrollregion=canvas.bbox("all")) #frame = Frame(master) return canvas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def setup_scrollbar(self):\r\n self.container_widgets[\"order_frame\"].grid_propagate(False)\r\n self.container_widgets[\"orders_scrollbar\"].grid(row=0, column=1, sticky='ns')\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-4>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-5>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].config(\r\n yscrollcommand=self.container_widgets[\"orders_scrollbar\"].set)\r\n self.container_widgets[\"order_canvas\"].config(\r\n scrollregion=self.container_widgets[\"order_canvas\"].bbox(\"all\"))\r\n self.container_widgets[\"order_canvas\"].create_window(\r\n (0, 0),\r\n window=self.container_widgets[\"orders_container\"],\r\n anchor='nw')\r\n # TODO change width\r\n self.container_widgets[\"order_canvas\"].config(\r\n width=600 + self.container_widgets[\"orders_scrollbar\"].winfo_width())", "def init_canvas_frame(self, max_width=4000, max_height=4000):\n self.frames[\"canvas\"] = Frame(\n master=self.window, width=400, height=400)\n self.canvas = Canvas(\n master=self.frames[\"canvas\"],\n scrollregion=(0, 0, max_width, max_height),\n bg=\"white\")\n h_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=HORIZONTAL)\n h_scrl_bar.pack(side=BOTTOM, fill=X)\n h_scrl_bar.config(command=self.canvas.xview)\n v_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=VERTICAL)\n v_scrl_bar.pack(side=RIGHT, fill=Y)\n v_scrl_bar.config(command=self.canvas.yview)\n self.canvas.config(\n xscrollcommand=h_scrl_bar.set,\n yscrollcommand=v_scrl_bar.set)\n self.canvas.pack(side=LEFT, expand=True, fill=BOTH)\n self.frames[\"canvas\"].pack(\n anchor=\"nw\", side=LEFT, expand=True, fill=BOTH)\n\n self.canvas.bind(\"<ButtonPress-1>\", self.move_start)\n self.canvas.bind(\"<B1-Motion>\", self.move_move)\n self.canvas.bind(\"<Button-4>\", self.linux_zoomer_plus)\n self.canvas.bind(\"<Button-5>\", self.linux_zoomer_minus)\n # windows scroll\n self.canvas.bind(\"<MouseWheel>\", self.windows_zoomer)", "def Configure_YScroll( self ):\r\n Label(self.frame_scroll).pack( side = TOP )\r\n self.yscroll = Scrollbar( self.frame_scroll )\r\n self.yscroll.config( command = self.Vertical_Scroll )\r\n self.canvas_one.config( yscrollcommand = self.Double_Expand )\r\n self.canvas_two.config( yscrollcommand = self.Double_Expand )", "def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window", "def _configure_interior(event):\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def update_scrollbar(self):\n self.testCanvas.bind('<Configure>', self.on_configure)\n self.testFrame.bind('<Configure>', self.on_configure)", "def __init__(self, master, async = 1, kw = {}, **opts):\n if not opts.has_key('bg'): opts['bg'] =\"white\"\n if not opts.has_key('highlightthickness'): opts['highlightthickness'] = 0\n ScrolledCanvas.__init__(self, master, kw, **opts)\n self.plusicon = self.minusicon = None\n self.nodeheight = 20\n self.sizetree = 0\n self.node = None\n self.first = None\n self.y = 0\n self.selection = []\n self.displayed = []\n self.async = async\n self.cancel_draw = None\n self[\"width\"] = 280\n self[\"height\"] = 200\n \n # There must be a better way to register Resize Event...\n self.bind(\"<Configure>\", self._resized)", "def create_board_canvas(master: Widget) -> None:\r\n\r\n self.canvas = Canvas(master, bg='black')\r\n self.canvas.bind('<Configure>', self.on_canvas_resize)\r\n self.canvas.bind(\"<B1-Motion>\", self.on_canvas_click)\r\n self.canvas.bind(\"<Button-1>\", self.on_canvas_click)\r\n self.canvas.bind(\"<ButtonRelease-1>\", self.on_canvas_mouse_release)\r\n self.canvas.pack(fill=BOTH, expand = TRUE)", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def on_configure(self, event):\n self.testCanvas.configure(scrollregion=self.testCanvas.bbox('all'))\n self.testCanvas.yview_moveto(1)", "def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def __init__(self, master, mode, height, width=62, command=None):\r\n scrollbar = Scrollbar(master)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n Listbox.__init__(self, master, selectmode=mode, height=height, width=width, yscrollcommand=scrollbar.set, command=command)\r\n self.pack(fill=X)\r\n scrollbar.config(command=self.yview)", "def _make_message_frame( self, parent, default_scroll = True ):\n# color = \"black\" # this may need a bit of rework -- looks like not used\n #iframe = Tk.Frame( parent, width=300, height=800,\n # bg =\"blue\", relief = Tk.RAISED, borderwidth=1, )\n iframe = self\n\n # bframe is for the buttons on the left\n bframe = Tk.Frame( iframe, bg = \"white\", width=30 )\n # width=300, height=800, bg =\"blue\", relief=RAISED, borderwidth=1, )\n bframe.grid( row=0, column=0, sticky = Tk.N + Tk.S )\n\n text0 = Tk.Text( iframe , width=50, height=20 )\n\n s_text0 = Tk.Scrollbar( iframe )\n s_text0.grid( row=0, column=2, sticky = Tk.N + Tk.S )\n\n s_text0.config( command=text0.yview )\n text0.config( yscrollcommand=s_text0.set )\n\n text0.grid( row=0, column=1, sticky = Tk.N + Tk.S + Tk.E + Tk.W )\n\n self.msg_text = text0\n\n iframe.grid_columnconfigure( 1, weight=1 )\n iframe.grid_rowconfigure( 0, weight=1 )\n\n # now into the button frame bframe\n\n # spacer\n s_frame = Tk.Frame( bframe, bg =\"green\", height=20 ) # width=30 )\n s_frame.grid( row=0, column=0 )\n row_ix = 0\n\n # ---- Clear button\n b_clear = Tk.Button( bframe , width=10, height=2, text = \"Clear\" )\n b_clear.bind( \"<Button-1>\", self.do_clear_button )\n if self.gui_style:\n self.gui_style.style_button( b_clear )\n b_clear.grid( row=row_ix, column=0 )\n\n self.button_widgets.append( b_clear )\n row_ix += 1\n\n # ---- Copy selection\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Cop Selection\",\n command = self.copy_selection)\n # b_temp.bind( \"<Button-1>\", self.doButtonText )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n #-----\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Copy All\" )\n a_widget.bind( \"<Button-1>\", self.do_copy_button )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n # -------------\n self.cb_scroll_var = Tk.IntVar() # for check box in reciev frame\n a_widget = Tk.Checkbutton( bframe,\n width = 7,\n height = 2,\n text = \"A Scroll\",\n variable = self.cb_scroll_var,\n command = self.do_auto_scroll )\n\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n\n row_ix += 1\n self.cb_scroll_var.set( default_scroll ) # was AppGlobal.parameters.default_scroll )\n\n return iframe", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def __init__(self, *args, **kwargs):\r\n\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.title(TITLE)\r\n self.geometry(f\"{WIDTH}x{HEIGHT}\")\r\n self.config(background=\"pale turquoise\")\r\n\r\n self.scroll_frame = VerticalScrolledFrame(self)\r\n self.scroll_frame.grid(column=1, row=3)\r\n\r\n self.place_widgets()", "def open_window(self,size):\n # Window\n self.root = Tk()\n self.root.geometry(size)\n self.root.resizable(0, 0)\n\n\n # Tree\n self.tree = ttk.Treeview(self.root, heigh=20)\n self.tree.grid(row=4, column=0, padx=20)\n self.tree.grid(columnspan=5)\n\n hsb = ttk.Scrollbar(self.root, orient=\"horizontal\")\n hsb.configure(command=self.tree.xview)\n self.tree.configure(xscrollcommand=hsb.set)\n hsb.grid(row=5, column=0, padx=20, pady=20, columnspan=5, sticky=(W + E))", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def onFrameConfigure(self, event):\n self.panel_002.config(scrollregion=self.panel_002.bbox(\"all\"))", "def Data_Frame( self ):\r\n #Create pane\r\n p = self.pane_widget.add( \"Data\", min = 0.1, max = 0.9)\r\n frame_sequence = Frame( p )\r\n #xscroll at the top\r\n self.xscroll = Scrollbar( frame_sequence, orient = HORIZONTAL )\r\n self.xscroll.pack(side = TOP, fill = X )\r\n #create the canvas where the data will be displayed\r\n self.canvas_two = Canvas( frame_sequence )\r\n #Make sure these values are consistent with self.canvas_one in Tree_Frame\r\n self.canvas_two.pack( side = TOP, fill = BOTH, expand = 1 )\r\n self.xscroll.config( command = self.canvas_two.xview )\r\n self.canvas_two.config( xscrollcommand = self.xscroll.set )\r\n frame_sequence.pack(side=LEFT, fill = BOTH)", "def _on_scrollbar(self, *args) -> None:\r\n for textbox in self.textboxes:\r\n textbox.yview(*args)", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def grow_editor(self):\n self.mixerScrollArea.hide()\n return\n sp_height = self.mixerScrollArea.h_pad.height() + 3\n height = self.mixer.SYNTH_HEIGHT + sp_height \n self.mixerScrollArea.setFixedHeight(height + 10)", "def __init__(self, gui, x, y, width, start_val=None, callback=None, label=None,\r\n label_pos='left', shortcut=None):\r\n imgs = [\"scroll.gif\", \"scroll.gif\", \"scroll_lh.gif\", \"scroll_rh.gif\",\r\n \"scroll_thumb.gif\"]\r\n shapes = []\r\n end_w = 0\r\n for i, im in enumerate(imgs):\r\n tex = pi3d.Texture(gui.icon_path + im, blend=True, mipmap=False)\r\n w = tex.ix if i > 0 else width\r\n if i == 2:\r\n end_w = tex.ix #offsets for end buttons\r\n if i == 4:\r\n thumb_w = tex.ix / 2.0 #offset for thumb\r\n shape = pi3d.Sprite(camera=gui.camera, w=w, h=tex.iy, z=2.0)\r\n shape.set_draw_details(gui.shader, [tex])\r\n shapes.append(shape)\r\n super(Scrollbar, self).__init__(gui, shapes, x, y, callback=callback,\r\n label=label, label_pos=label_pos, shortcut=shortcut)\r\n self.toggle = False\r\n self.t_stop = [self.bounds[0] + thumb_w, self.bounds[2] - thumb_w]\r\n if not start_val:\r\n start_val = width / 2.0\r\n self.thumbpos = start_val / width * (self.t_stop[1] - self.t_stop[0])\r\n self.shapes[4].positionX(self.t_stop[0] + self.thumbpos)\r\n self.shapes[4].translateZ(-0.1)\r\n self.shapes[2].translateX((-width - end_w) / 2.0)\r\n self.shapes[3].translateX((width + end_w) / 2.0)\r\n self.bounds[0] -= end_w\r\n self.bounds[2] += end_w\r\n if self.labelobj:\r\n if label_pos == 'left':\r\n self.labelobj.translateX(-end_w)\r\n elif label_pos == 'right':\r\n self.labelobj.translateX(end_w)", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def setup(self):\n\n # push the frame for the toplevel window\n self.lumpy.pushfr(self.tl)\n self.lumpy.col([0,1])\n\n # the frame at the top contains buttons\n self.lumpy.row([0,0,1], bg='white')\n self.lumpy.bu(text='Close', command=self.close)\n self.lumpy.bu(text='Print to file:', command=self.printfile)\n self.en = self.lumpy.en(width=10, text='lumpy.ps')\n self.en.bind('<Return>', self.printfile)\n self.la = self.lumpy.la(width=40)\n self.lumpy.endrow()\n\n # the grid contains the canvas and scrollbars\n self.lumpy.gr(2)\n \n self.ca_width = 1000\n self.ca_height = 500\n self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')\n\n yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)\n xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,\n sticky=E+W)\n self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,\n scrollregion=(0, 0, 800, 800))\n \n self.lumpy.endgr()\n self.lumpy.endcol()\n self.lumpy.popfr()\n\n # measure some sample letters to get the text height\n # and set the scale factor for the canvas accordingly\n self.canvas.clear_transforms()\n bbox = self.canvas.measure(['bdfhklgjpqy'])\n self.unit = 1.0 * bbox.height()\n transform = ScaleTransform([self.unit, self.unit])\n self.canvas.add_transform(transform)", "def __init_UI(self):\r\n\r\n ## Setting up the vertical bar\r\n # self.bar = self.verticalScrollBar()\r\n\r\n # Create the inner widget of the scroll area\r\n self.inner_widget = QWidget(self)\r\n self.setWidget(self.inner_widget)\r\n\r\n # Create a vertical layout inside the previous widget\r\n self.__layout = QVBoxLayout(self)\r\n self.inner_widget.setLayout(self.__layout)\r\n\r\n # More settings\r\n self.setWidgetResizable(True)", "def __init__(self, master):\n\t\tFrame.__init__(self,master)\n\t\t\"\"\"Set the Window Title\"\"\"\n\t\tself.master.title(\"RXF Data Fit\")\n\t\tself.configure(height=200,width=200)\n\t\t\"\"\"Display the main window with a little bit of padding\"\"\"\n\t\tself.grid(padx=15, pady=15,sticky=N+S+E+W) \n\t\t#Create the Menu base\n\t\tself.menu = Menu(self)\n\t\t#Add the Menu\n\t\tself.master.config(menu=self.menu)\n\t\tself.menu.add_command(label=\"Open\", command=self.fileOpen)\n\t\tself.menu.add_command(label=\"Help\", command=self.Simple)\n\t\tself.menu.add_command(label=\"Quit\", command=self.exitProgram)\n\t\tself.pack()\n\t\tf = Figure(figsize=(5,4), dpi=100)\n\t\tcanvas=FigureCanvasTkAgg(f,master=root)\n\t\tcanvas.show()\n\t\tcanvas.get_tk_widget().pack(side=\"top\", fill=\"both\", expand=1)\n\t\ttoolbar = NavigationToolbar2TkAgg( canvas, root )\n\t\ttoolbar.update()\n\t\tcanvas._tkcanvas.pack(side=\"top\", fill=\"both\", expand=1)\t\t\n\n\n\t\txRangeLabel=Label(root,text=\"X Range\")\n\t\txRangeLabel.pack()\t\t\n\t\n\t\treplotButton=Button(root, text=\"Replot\", command=self.replot)\n\t\treplotButton.pack()\n\t\n\t\tclearButton=Button(root,text=\"Clear Plot\", command=self.clearPlot)\n\t\tclearButton.pack(padx=20,pady=5)" ]
[ "0.8507373", "0.7523171", "0.7226593", "0.71893096", "0.7073045", "0.6998887", "0.68270934", "0.6655979", "0.6654952", "0.65454537", "0.6471068", "0.63823843", "0.635936", "0.62776184", "0.6237337", "0.6183595", "0.6153732", "0.61298877", "0.6124628", "0.6080095", "0.60280615", "0.59241533", "0.5874599", "0.586917", "0.5855225", "0.58530134", "0.58206344", "0.5818027", "0.58117145", "0.5808651" ]
0.82818425
1
Try to extract data from threshold expression.
def parse_threshold_exp(exp): # Regex used number = r"(\d+(?:\.\d*)?)" color = r"([WwBb])" half = number + color full = half + half re_number = re.compile(r"^"+number+r"$") re_half = re.compile(r"^"+half+r"$") re_full = re.compile(r"^"+full+r"$") # Parsing if exp == "": # Empty threshold return (float("-inf"), float("inf")) m = re_number.match(exp) if m != None: # exp is just a number : 5 return (-float(m[1]), float(m[1])) m = re_half.match(exp) if m != None: # exp is a half expression : 5B if m[2] in "Ww": #5W return (float("-inf"), float(m[1])) else: #5B return (-float(m[1]), float("inf")) m = re_full.match(exp) if m != None: # full exp : 5B4W if m[2] == m[4]: # same color return (None, None) else: if m[2] in "Ww": #5W4B return (-float(m[3]), float(m[1])) else: #5B4W return (-float(m[1]), float(m[3])) return (None, None) # No match => ill formed expression
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, threshold=0.5):\n pass", "def _get_pck(self, kp_id, threshold):\n if len(self.data[kp_id]) == 0:\n return None\n\n data = np.array(self.data[kp_id])\n pck = np.mean((data <= threshold).astype('float'))\n return pck", "def __init__(self, exp):\n self.min, self.max = parse_threshold_exp(exp)\n if self.min == None or self.max == None: # Error occured when parsing\n raise Exception(\"Threshold expression: '{:s}' is invalid.\".format(exp))", "def get_threshold(self):\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n roc = main_regime.event_handlers\n threshcond = \"\"\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n threshcond = self.replace_operators(oc.test)\n else: threshcond=None\n return threshcond", "def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)", "def get_threshold_data(self):\n return [list(x) for x in list(zip(self.thresholds,self.autothreshs))]", "def _check_threshold(threshold, value):\r\n return threshold[0](value, threshold[1])", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def infer_threshold(self, x: np.ndarray, fpr: float) -> None:\n self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def get_threshold_data(self):\n return [roi.get_threshold_data() for roi in self.rois]", "def calculate(memory):\r\n dThthreshold = \r\n Ththreshold = f(memory['latitude'],memory['longitude'])\r\n \r\n dTh = sum(i > dThthreshold for i in memory['dTh'])\r\n \r\n return", "def extract_hp_threshold(es):\n if skill_has_condition(es):\n c = es.condition\n if hasattr(c, 'hp_threshold'):\n return c.hp_threshold\n return None", "def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n from Hlt2Lines.Commissioning.Lines import CommissioningLines \n d.update({CommissioningLines :\n {'Prescale' : {'Hlt2PassThrough' : 0.0001,\n 'Hlt2Forward' : 0.00001,\n 'Hlt2DebugEvent' : 0.000001},\n 'Postscale' : {'Hlt2ErrorEvent' : 'RATE(0.01)'},\n # do not want debug events on lumi-exclusive Hlt1 events...\n 'DebugEvent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\"},\n 'ErrorEvent' : {'Priority' : 254,\n 'VoidFilter' : '',\n 'HLT2' : \"HLT_COUNT_ERRORBITS_RE('^Hlt2.*',0xffff) > 0\"},\n 'PassThrough' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'NoBiasPassThrough' : {'HLT1' : \"HLT_PASS('Hlt1NoBiasPrescaledDecision')\",\n 'VoidFilter' : ''},\n 'Transparent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(ODIN.*|L0.*|MB.*|BeamGas.*|Velo.*|NZS.*|Incident|Tell1Error|ErrorEvent)Decision$')\",\n 'VoidFilter' : ''},\n 'Lumi' : {'HLT1' : \"HLT_PASS_SUBSTR('Hlt1Lumi')\",\n 'VoidFilter' : ''},\n 'KS0_DD' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'KS0_LL' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'Turbo' : ['KS0_DD', 'KS0_LL']\n }}\n )\n return d", "def apply_hounsfield_thresholding(data_, threshold: tuple = (200, 600)):\n mask = np.ma.masked_inside(data_, threshold[0], threshold[1], ).mask\n thresholded = np.zeros_like(data_)\n thresholded[mask] = data_[mask]\n return thresholded", "def apply_threshold(da, threshold=1.):\n with np.errstate(all='ignore'):\n result = xr.where(da < threshold, np.nan, da)\n result.attrs = da.attrs\n return result", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def calculate_threshold_values(self, prob, y):\n df = pd.DataFrame({'prob': prob, 'y': y})\n df.sort_values('prob', inplace=True)\n\n actual_p = df.y.sum()\n actual_n = df.shape[0] - df.y.sum()\n\n df['tn'] = (df.y == 0).cumsum()\n df['fn'] = df.y.cumsum()\n df['fp'] = actual_n - df.tn\n df['tp'] = actual_p - df.fn\n\n df['fpr'] = df.fp / (df.fp + df.tn)\n df['tpr'] = df.tp / (df.tp + df.fn)\n df['precision'] = df.tp / (df.tp + df.fp)\n df = df.reset_index(drop=True)\n return df", "def test_check_single_threshold(self):\n data = np.array(\n [\n [[13.2, 8.0, 13.2], [-46.0, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[34, 31.1111, 34.0], [27.5, 31.1111, 8.0], [8.0, -32.5, -46.0]],\n [[54.8, 54.2222, 54.8], [53.5, 54.2222, 49.6], [49.6, 34, -2.8]],\n ],\n dtype=np.float32,\n )\n\n threshold_coord = find_threshold_coordinate(self.cube)\n cube = next(self.cube.slices_over(threshold_coord))\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def _get_sep_extract_threshold_(self):\n #print \"_get_sep_extract_threshold_ called\"\n \n if not hasattr(self,\"_sepbackground\"):\n _ = self.get_sep_background(update_background=False)\n return self._sepbackground.globalrms*1.5", "def extractImpact(data):\n return {key : array([hellingerDistance(i.px, data['{}'].px) for i in val]) for key, val in data.items() if key != '{}'}", "def thresh_setup():\n pass", "def extract_critic_conditioning(self, data):\n return data[0]", "def global_threshold(img, threshold_method):\n pass", "def analyze(self, word_count_thresh):", "def _thresholding(self, img_gray):\n blob_pixels = np.where(img_gray > self.thresh)\n blob_pixels = np.asarray(blob_pixels)\n\n return blob_pixels", "def _thresholding(self, img_gray):\n blob_pixels = np.where(img_gray > self.thresh)\n blob_pixels = np.asarray(blob_pixels)\n\n return blob_pixels", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def tissue_specific_ppi_cut(data, expr_level, threshold=0.22):\n row, col = data.edge_index\n data.new_id = data.id[expr_level > threshold]\n mask = torch.tensor(\n np.logical_and(np.isin(row, data.new_id), np.isin(col, data.new_id))\n )\n data.edge_index = data.edge_index[:, mask]\n data.edge_attr = data.edge_attr[mask]\n data.expr_mask = mask\n return data" ]
[ "0.6275016", "0.5969183", "0.5935895", "0.59351677", "0.58786726", "0.58748126", "0.586746", "0.5787836", "0.57735467", "0.5592178", "0.55909556", "0.55786645", "0.555594", "0.55175304", "0.54921883", "0.5464597", "0.5463139", "0.545508", "0.54243004", "0.5410539", "0.5408899", "0.5400531", "0.5370911", "0.53571796", "0.5347373", "0.5328634", "0.53170246", "0.53170246", "0.5312594", "0.5297436" ]
0.6682093
0
Create a Threshold object from an expression.
def __init__(self, exp): self.min, self.max = parse_threshold_exp(exp) if self.min == None or self.max == None: # Error occured when parsing raise Exception("Threshold expression: '{:s}' is invalid.".format(exp))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_threshold_exp(exp):\n # Regex used\n number = r\"(\\d+(?:\\.\\d*)?)\"\n color = r\"([WwBb])\"\n half = number + color\n full = half + half\n\n re_number = re.compile(r\"^\"+number+r\"$\")\n re_half = re.compile(r\"^\"+half+r\"$\")\n re_full = re.compile(r\"^\"+full+r\"$\")\n\n # Parsing\n if exp == \"\": # Empty threshold\n return (float(\"-inf\"), float(\"inf\"))\n\n m = re_number.match(exp)\n if m != None: # exp is just a number : 5\n return (-float(m[1]), float(m[1]))\n\n m = re_half.match(exp)\n if m != None: # exp is a half expression : 5B\n if m[2] in \"Ww\": #5W\n return (float(\"-inf\"), float(m[1]))\n else: #5B\n return (-float(m[1]), float(\"inf\"))\n\n m = re_full.match(exp)\n if m != None: # full exp : 5B4W\n if m[2] == m[4]: # same color\n return (None, None)\n else:\n if m[2] in \"Ww\": #5W4B\n return (-float(m[3]), float(m[1]))\n else: #5B4W\n return (-float(m[1]), float(m[3]))\n\n return (None, None) # No match => ill formed expression", "def histogram_threshold_calculator(*args, **kwargs):\n import itk\n instance = itk.HistogramThresholdCalculator.New(*args, **kwargs)\n return instance.__internal_call__()", "def New(*args, **kargs):\n obj = itkHistogramThresholdCalculatorHFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def evaluate(self, threshold=0.5):\n pass", "def New(*args, **kargs):\n obj = itkHistogramThresholdCalculatorHDF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def overlay_thresholding_function(threshold, positive=True):\n # from the interface class definition above, there will be 3 values\n # for the thresh type: inactive, less than, greater than\n t = threshold[0]\n if threshold[-1] == 'inactive':\n if positive:\n return lambda x: np.ones(x.shape, 'B')\n return lambda x: np.zeros(x.shape, 'B')\n elif threshold[-1] == 'less than':\n if positive:\n return lambda x: np.less(x,t)\n return lambda x: np.greater_equal(x,t)\n elif threshold[-1] == 'greater than':\n if positive:\n return lambda x: np.greater(x,t)\n return lambda x: np.less_equal(x,t)\n else:\n print 'unrecognized thresholding parameters:', threshold", "def from_string_expr(cls, expr):\n if \"*\" in expr:\n ch = \"*\"\n op = \"cross\"\n elif \"+\" in expr:\n ch = \"+\"\n op = \"blend\"\n elif \"/\" in expr:\n ch = \"/\"\n op = \"nest\"\n factors = [cls(s.strip()) for s in expr.split(ch)]\n return cls(op=op, factors=factors)", "def Threshold(self, threshold=0.0):\n return _hypre.HypreParMatrix_Threshold(self, threshold)", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def iso_data_threshold_calculator(*args, **kwargs):\n import itk\n instance = itk.IsoDataThresholdCalculator.New(*args, **kwargs)\n return instance.__internal_call__()", "def evaluate(hdf5_array, expression, expression_type, expression_level=0, hyperslice=None):\n # cherrypy.log.error(\"%sEvaluating %s expression: %s\" % (\n # \" \" * expression_level, expression_type, slycat.hyperchunks.tostring(expression)))\n\n if isinstance(expression, int):\n return expression\n elif isinstance(expression, float):\n return expression\n elif isinstance(expression, str):\n return expression\n elif isinstance(expression, slycat.hyperchunks.grammar.AttributeIndex):\n if hyperslice is None:\n return hdf5_array.get_data(expression.index)[...]\n else:\n return hdf5_array.get_data(expression.index)[hyperslice]\n elif isinstance(expression, slycat.hyperchunks.grammar.BinaryOperator):\n left = evaluate(hdf5_array, expression.operands[0], expression_type, expression_level + 1)\n for operand in expression.operands[1:]:\n right = evaluate(hdf5_array, operand, expression_type, expression_level + 1)\n # cherrypy.log.error(\"left::%s \\n right::%s\" % (left, right))\n if expression.operator == \"<\":\n left = left < right\n elif expression.operator == \">\":\n left = left > right\n elif expression.operator == \"<=\":\n left = left <= right\n elif expression.operator == \">=\":\n left = left >= right\n elif expression.operator == \"==\":\n if numpy.isnan(right):\n left = numpy.isnan(left)\n else:\n left = left == right\n elif expression.operator == \"!=\":\n left = left != right\n elif expression.operator == \"and\":\n left = numpy.logical_and(left, right)\n elif expression.operator == \"or\":\n left = numpy.logical_or(left, right)\n elif expression.operator == \"in\":\n left = numpy.in1d(left, right)\n elif expression.operator == \"not in\":\n left = numpy.in1d(left, right, invert=True)\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\",\n \"Unknown operator: %s\" % expression.operator)\n raise ValueError(\"Unknown operator: %s\" % expression.operator)\n return left\n elif isinstance(expression, slycat.hyperchunks.grammar.FunctionCall):\n if expression.name == \"index\":\n if hyperslice is None:\n return numpy.indices(hdf5_array.shape)[expression.args[0]]\n else:\n return numpy.indices(hdf5_array.shape)[expression.args[0]][hyperslice]\n elif expression.name == \"rank\":\n values = evaluate(hdf5_array, expression.args[0], expression_type, expression_level + 1)\n order = numpy.argsort(values)\n if expression.args[1] == \"desc\":\n order = order[::-1]\n return order\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\", \"Unknown function: %s\" % expression.name)\n raise ValueError(\"Unknown function: %s\" % expression.name)\n elif isinstance(expression, slycat.hyperchunks.grammar.List):\n return expression.values\n else:\n cherrypy.log.error(\"slycat.web.server.__init__.py evaluate\", \"Unknown expression: %s\" % expression)\n raise ValueError(\"Unknown expression: %s\" % expression)", "def New(*args, **kargs):\n obj = itkHistogramThresholdCalculatorHDUS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def para_lower_than(threshold):\n\n return lambda step, curr_obj, curr_optimized_obj, extra_para: extra_para<threshold", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, expr, a, name='scale'):\n super(ScaleExpression, self).__init__(e=expr, domain=expr.domain,\n verbosity=expr.verbosity,\n name=name)\n ## Factor to scale the expression by.\n self.a = a", "def evaluate_threshold_filters(isovar_result, filter_thresholds):\n filter_values_dict = OrderedDict()\n for name, threshold in filter_thresholds.items():\n parts = name.split(\"_\")\n min_or_max = parts[0]\n field_name = \"_\".join(parts[1:])\n if min_or_max == \"min\":\n comparison_fn = operator.ge\n elif min_or_max == \"max\":\n comparison_fn = operator.le\n else:\n raise ValueError(\n \"Invalid filter '%s', must start with 'min' or 'max'\" % name)\n if hasattr(isovar_result, field_name):\n field_value = getattr(isovar_result, field_name)\n else:\n raise ValueError(\n \"Invalid filter '%s' IsovarResult does not have property '%s'\" % (\n name,\n field_name))\n filter_values_dict[name] = comparison_fn(field_value, threshold)\n return filter_values_dict", "def New(*args, **kargs):\n obj = itkHistogramThresholdCalculatorHDSS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, label, expression):\n super(AggregateBooleanCount, self).__init__(label)\n self.expression = expression", "def create_function(self, dimensions, thresholds):\r\n def f(v):\r\n s = ''\r\n for i in range(len(dimensions)):\r\n if(float(v[dimensions[i]])>=thresholds[i]):\r\n s +='1'\r\n else:\r\n s +='0'\r\n return s\r\n raise NotImplementedError\r\n return f", "def parse_expression(expression: str) -> nodes.ExpNode:\r\n\r\n tokens = tokenize(expression)\r\n node = build_expression_tree(tokens)\r\n\r\n return node", "def get_threshold(self):\n\n if self.threshold.startswith('+'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = True\n elif self.threshold.startswith('-'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = False\n else:\n if self.threshold.isdigit():\n self._threshold = int(self.threshold)\n self._upper = True\n if not hasattr(self, '_threshold'):\n raise ValueError('Invalid threshold')", "def Eval(expression):\n # pylint: disable=eval-used\n return eval(expression)", "def get_threshold_mask(hparams, x):\n\n axis = list(range(1, x.shape.ndims))\n min_val = tf.reduce_min(x, axis=axis, keepdims=True)\n max_val = tf.reduce_max(x, axis=axis, keepdims=True)\n thresh = min_val + hparams.threshold_factor * (max_val - min_val)\n cond = tf.less(x, thresh)\n return tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))", "def __init__(self, active=False, threshold=0.2):\n self._active = active\n self._threshold = threshold", "def __lt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Less(self, other)", "def New(*args, **kargs):\n obj = itkHistogramThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def copy(self, threshold):\n self.indicator = threshold['indicator']\n self.stage = threshold['stage']\n self.begin = threshold['begin']\n self.end = threshold['end']\n self.quality = threshold['quality']\n self.weight = threshold['weight']\n return self", "def __init__(self, factor: float = 0.5, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.factor = factor\n self.threshold = threshold\n self.initial_val = initial_val" ]
[ "0.59799373", "0.5456655", "0.50720286", "0.5037704", "0.5025464", "0.49485746", "0.49417642", "0.4913881", "0.49124554", "0.48948416", "0.4884498", "0.48352537", "0.48344046", "0.47972414", "0.47884724", "0.47574377", "0.47426808", "0.47359854", "0.4734204", "0.4707607", "0.46993425", "0.46392497", "0.4629937", "0.46166775", "0.4611023", "0.46060342", "0.46009332", "0.45988834", "0.4589814", "0.45885384" ]
0.6444987
0
Sets the version of this Listing. The version of the listing.
def version(self, version): self._version = version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version(self, version):\n \n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def version(self, version: int):\n\n self._version = version", "def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]", "def version(self, newVersion=None):\n pass", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version" ]
[ "0.7229378", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.7204764", "0.71070164", "0.71070164", "0.70981675", "0.7090488", "0.67855275", "0.65729374", "0.6516881", "0.6516881", "0.6516881" ]
0.7257909
1
Gets the tagline of this Listing. The tagline of the listing.
def tagline(self): return self._tagline
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLine(self):\n return _libsbml.XMLToken_getLine(self)", "def get_tag(self):\n return self.tag", "def getLine(self):\n return _libsbml.SBase_getLine(self)", "def getLine(self):\n return _libsbml.SBasePlugin_getLine(self)", "def tag(self):\n return self.tag_", "def get_startline(self):\n return self.get_attribute(\"startline\")", "def get(self):\n\t\t\n\t\treturn self.line", "def tag(self):\n return self._tag", "def line (self):\n return self._line", "def tag(self):\n\n return self._tag", "def get_tag(self) -> int:\n return self.tag", "def line(self):\n\n\t\treturn self.__line", "def line(self):\n return self._line", "def tag(self):\n return self._tag", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def _get_tag(self):\n return self.__tag", "def tag(self) -> str:\n return self._tag", "def get(self):\n return self.tag.get()", "def get_tag(self, key):\n return self._entries[key]", "def get_line(cls, node):\n return cls.lines[node.lineno - 1].strip()", "def get_vlan_tag(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetVlanTag', self.handle)", "def get_vlan_tag(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetVlanTag', self.handle)", "def get_line_start(self):\n return self._line_start", "def tag(self) -> str:\n return pulumi.get(self, \"tag\")", "def get_roi_line(self):\n return self.line_list", "def get_endline(self):\n return self.get_attribute(\"endline\")", "def getline(self, lnum=None):\n return self._vim.current.buffer[lnum] if lnum else self._vim.current.line" ]
[ "0.67622364", "0.63997036", "0.63478005", "0.6262222", "0.6098514", "0.6075437", "0.605605", "0.6049765", "0.60336107", "0.60215366", "0.60076064", "0.59944695", "0.59792364", "0.5970892", "0.5966851", "0.5966851", "0.5966851", "0.5966851", "0.5802726", "0.57422805", "0.5718058", "0.5717465", "0.56727165", "0.56497914", "0.56429446", "0.56403065", "0.5608833", "0.5551809", "0.55442095", "0.5528011" ]
0.8239116
0
Sets the tagline of this Listing. The tagline of the listing.
def tagline(self, tagline): self._tagline = tagline
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_tag(self, tag):\n self.update(tag=tag)", "def tagline(self):\n return self._tagline", "def set_tag(self, t) -> None:\n self.tag = t", "def set_startline(self, line_no):\n self.set_attribute(\"startline\", line_no)", "def line_style(self, line_style):\n\n self.container['line_style'] = line_style", "def tag(self, tag):\n self.tag = tag", "def set_vlan_tag(self, nVlanTag):\n\t\tcall_sdk_function('PrlVirtNet_SetVlanTag', self.handle, nVlanTag)", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def multi_line(self, multi_line):\n\n self._multi_line = multi_line", "def set_start(self, start_line):\n self.__start_line = start_line", "def set_tag(self, val):\n self.__tag__ = val", "def line_nbr(self, line_nbr):\n\n self._line_nbr = line_nbr", "def set_tag(self, key, value):\n return self", "def setTag(self, tag):\n\t\tself.config.TAG = tag", "def liver(self, liver):\n\n self.logger.debug(\"In 'liver' setter.\")\n\n self._liver = liver", "def setAddTags(self,value):\n self.PDFreactorConfiguration.in1[\"addTags\"] = value", "def line_item_id(self, line_item_id):\n\n self._line_item_id = line_item_id", "def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)", "def line_status(self, line_status):\n\n self._line_status = line_status", "def set_linked_name(self, lin):\n return", "def helpline(self, helpline):\n\n self._helpline = helpline", "def set_line(self, line, membership):\n self._lines[line] = membership", "def line_number(self, line_number):\n\n self._line_number = line_number", "def setup_list(self) -> None:\n style = self.current_line.next_line.line_parts[0].style.copy()\n\n if self.list_style is None:\n self.list_style = {}\n elif isinstance(self.list_style, str):\n self.list_style = process_style(self.list_style, self.pdf)\n\n if not isinstance(self.list_style, dict):\n raise TypeError(\n 'list_style must be a str or a dict. Value: {}'\n .format(self.list_style)\n )\n\n style.update(self.list_style)\n line_part = PDFTextLinePart(style, self.fonts)\n\n self.current_line_used_fonts.add(\n (line_part.state.font_family, line_part.state.font_mode)\n )\n\n if self.list_indent is None:\n self.list_indent = line_part.get_word_width(str(self.list_text))\n elif not isinstance(self.list_indent, (float, int)):\n raise TypeError(\n 'list_indent must be int or float. Value: {}'\n .format(self.list_indent)\n )\n\n self.list_state = line_part.state\n self.current_line.max_width -= self.list_indent", "def addkeyword(self, line):\n self.__keywords.append(line)", "def update_line(self):\n self._draw_line_text()\n self._draw_status()\n self._line_listbox.set_focus(self.model.l_index)", "def set_tag(self, scope, key, value):\r\n self._tags[scope][key] = value\r\n print 'SET', scope, key, value, self._tags" ]
[ "0.6100483", "0.6047296", "0.5798566", "0.5764338", "0.5673966", "0.5649316", "0.5629988", "0.56268173", "0.56268173", "0.56268173", "0.56268173", "0.56020707", "0.5582531", "0.55677855", "0.55462193", "0.5512546", "0.5488909", "0.54786795", "0.54657596", "0.54364645", "0.5430346", "0.53881353", "0.5377269", "0.53528064", "0.53479236", "0.5309674", "0.5299581", "0.5277456", "0.5263848", "0.52559894" ]
0.816503
0
Sets the keywords of this Listing. Keywords associated with the listing.
def keywords(self, keywords): self._keywords = keywords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keywords(self, keywords):\n\n self._keywords = keywords", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def setKeywords(self,value):\n self.PDFreactorConfiguration.in1[\"keywords\"] = value", "def SetKeyWords(self, kw_lst):\n # Parse Keyword Settings List simply ignoring bad values and badly\n # formed lists\n self._code['keywords'] = list()\n kwlist = \"\"\n for keyw in kw_lst:\n if len(keyw) != 2:\n continue\n else:\n if not isinstance(keyw[0], int) or \\\n not isinstance(keyw[1], basestring):\n continue\n else:\n kwlist += keyw[1]\n super(EditraBaseStc, self).SetKeyWords(keyw[0], keyw[1])\n\n # Can't have ? in scintilla autocomp list unless specifying an image\n # TODO: this should be handled by the autocomp service\n if '?' in kwlist:\n kwlist.replace('?', '')\n\n kwlist = kwlist.split() # Split into a list of words\n kwlist = list(set(kwlist)) # Remove duplicates from the list\n kwlist.sort() # Sort into alphabetical order\n\n self._code['keywords'] = kwlist", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def addkeywords(self, keywords):\n if isinstance(keywords, str):\n keywords = [keywords]\n self._kw.extend(keywords)", "def set_keywords(self, mode, keywords, filename):\n return self.set_keywords_batch(mode, keywords, [filename])", "def set_keywords(self, **kwargs):\n keywords = dict()\n\n for key, value in self.allowed_keys.items():\n keywords[key] = value[1]\n\n for key, value in kwargs.items():\n if key not in self.allowed_keys:\n error = 'Keyword %s for %s object not found' % \\\n (key, self.__class__.__name__)\n MASTError(self.__class__.__name__, error)\n\n# raise RuntimeError('Keyword %s for %s object not found' % \\\n# (key, self.__class__.__name__))\n\n if isinstance(value, self.allowed_keys[key][0]):\n keywords[key] = value\n else:\n error = 'Keyword %s value %s invalid; expected type %s, got type %s' % (key, str(value), self.allowed_keys[key][0], type(value))\n MASTError(self.__class__.__name__, error)\n# raise RuntimeError('Keyword %s value invalid' % key)\n\n return keywords", "def filter_keywords(self, keywords):\n\t\tself.keywords += self._coerce_list(keywords)", "def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)", "def Adjust_Keyword_List( self ):\r\n listing = list( self.system.Get_Term_List( ) ) #get the term list of the current profile\r\n\r\n d=ExpressionAdjust.ExpressionAdjuster( self.root, listing, 'Keywords' )\r\n if(d.return_state==0):\r\n return #Cancel hit\r\n self.system.Set_Term_List( d.profile_list )\r\n self.system.Apply_Profile_Term_List_2()", "def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp", "def keywords(self) -> Set[str]:\n return self._keywords", "def clearkeywords(self):\n self._kw = []", "def recommend_by_keywords(self, key_words_list=None):\n pass", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def add_keyword(self,\r\n index,\r\n keywords):\r\n\r\n if isinstance(keywords, str):\r\n keywords = {keywords}\r\n\r\n self.edit(index,\r\n self.get_keys_from_note(index).union(keywords),\r\n self.get_text_from_note(index))", "def extended_keywords(self, extended_keywords):\n\n self._extended_keywords = extended_keywords", "def keywords(self):\n return self.__keywords", "def keywords(self):\n return self._keywords", "def keywords(self):\n return self._keywords", "def apply_keyword_to_fields_list(self, metadata_list):\n self._basket.apply_keyword_to_fields_list(metadata_list)", "def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L", "def getKeywords(self):\n return", "def keywords(self):\n return list(self._kw)", "def _update_key_set(self):\n self._key_set = set([item.keyword for item in self._metadata])", "def removekeywords(self, keywords):\n if isinstance(keywords, str):\n keywords = [keywords]\n for kw in keywords:\n self._kw.remove(kw)", "def test_set_keywords_1(self):\n data_dict = {\"type\":\"ADD\",\n \"cluster\":\"RETRIEVE\",\n \"subcluster\": \"NONE\",\n \"host_genus\": \"PARSE\",\n \"retrieve_record\": \"RETAIN\"}\n keywords = set([\"retrieve\", \"retain\"])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"ADD\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(data_dict[\"subcluster\"], \"none\")\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"parse\")\n with self.subTest():\n self.assertEqual(data_dict[\"retrieve_record\"], \"retain\")", "def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)", "def edit_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)" ]
[ "0.7726518", "0.7518018", "0.7350841", "0.7108992", "0.7096946", "0.6822157", "0.6642924", "0.6570114", "0.6494174", "0.63918066", "0.62678164", "0.62652683", "0.6251283", "0.6152986", "0.6038084", "0.6029439", "0.59871435", "0.59594876", "0.59410316", "0.5937035", "0.5937035", "0.5935739", "0.5915433", "0.5897923", "0.5896247", "0.5855008", "0.58382785", "0.5819565", "0.5818612", "0.57635" ]
0.7723733
1
Gets the short_description of this Listing. A short description of the listing.
def short_description(self): return self._short_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def short_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"short_description\")", "def short_description(self):\n return self.name", "def shortDescription(self):\n return self._line.description", "def short_description(self):\n description = self.description\n if description is not None:\n lines = description.splitlines()\n title = []\n for line in lines:\n line = line.strip()\n if line == \"\":\n if len(title) > 0:\n break\n else:\n title.append(line)\n description = \"\\n\".join(textwrap.wrap(\"\\n\".join(title), 80))\n\n return description", "def long_description(self):\n return self._long_description", "def shortDescription(self):\n return None", "def long_description(self) -> str:\n return self._long_description", "def short_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"short_description\")", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self._description", "def short_desc(self):\n return str(self.id)", "def get_description(self):\n\n return self._description", "def GetDescription(self):\n return str(self.description)", "def description(self):\n\n return self._get_field(\"description\")", "def get_description(self):\n return DisplayText(self._description)", "def get_description(self):\n return self.__description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description" ]
[ "0.73577285", "0.72625756", "0.72026336", "0.7117337", "0.709271", "0.7026649", "0.6991476", "0.6942552", "0.6826563", "0.6826563", "0.6826563", "0.6826563", "0.6801069", "0.67609227", "0.6740555", "0.6733474", "0.6720813", "0.671257", "0.66941947", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566", "0.66788566" ]
0.79243517
1
Sets the short_description of this Listing. A short description of the listing.
def short_description(self, short_description): self._short_description = short_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def short_description(self, short_description):\n\n self._short_description = short_description", "def short_description(self, short_description):\n if short_description is None:\n raise ValueError(\"Invalid value for `short_description`, must not be `None`\")\n\n self._short_description = short_description", "def set_longdescription(self, longdesc):\n self.longdescription(longdesc)", "def short_description(self):\n return self._short_description", "def short_description(self):\n return self._short_description", "def long_description(self, long_description):\n self._long_description = long_description", "def long_description(self, long_description: str):\n\n self._long_description = long_description", "def SetDescription(self, description):\n self.description = str(description)", "def SetShortHelp(self, s):\r\n\r\n self.short_help = s", "def set_description(self, description):\n self.description = description", "def short_description(self):\n description = self.description\n if description is not None:\n lines = description.splitlines()\n title = []\n for line in lines:\n line = line.strip()\n if line == \"\":\n if len(title) > 0:\n break\n else:\n title.append(line)\n description = \"\\n\".join(textwrap.wrap(\"\\n\".join(title), 80))\n\n return description", "def set_description(self, description):\n self._description = description", "def set_description(self, description):\r\n self.__description = description", "def set_description(self, description):\n self.__description = description", "def short_url(self, short_url):\n\n self._short_url = short_url", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description" ]
[ "0.8449821", "0.80678433", "0.68069285", "0.6783612", "0.6783612", "0.6728136", "0.66674626", "0.66212046", "0.6584278", "0.6528017", "0.6505532", "0.64720845", "0.64129335", "0.64046067", "0.6326431", "0.6312329", "0.6312329", "0.6312329", "0.6312329", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732", "0.6290732" ]
0.8429913
1
Gets the usage_information of this Listing. Usage information for the listing.
def usage_information(self): return self._usage_information
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def get_usage_stats(self) -> UsageStats:\n return self._usage", "def usage(self):\r\n return usage.Usage(self)", "def get_usage(self):\n return self.box_usage", "def get_usage_info(self):\n\n usage_info = resource.getrusage(resource.RUSAGE_SELF)\n user_cpu = usage_info[0]\n system_cpu = usage_info[1]\n rss_size = usage_info[2]\n\n return user_cpu, system_cpu, rss_size", "def get_usage(self, start=None, end=None):\n return self.manager.get_usage(self, start=start, end=end)", "def get_usage(self):\r\n return self.box_usage", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def InfoList(self):\n return self._InfoList", "def getInfo(self):\n return self._info", "def getInfo(self):\n return self.info", "def info(self):\n return self._info", "def usage():\n return _usage", "def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))", "def get_usage_data(self):\n with self._lock:\n data_copy = self._data.copy()\n return data_copy", "def usage_information(self, usage_information):\n self._usage_information = usage_information", "def info(self):\n return self._info", "def get_info(self):\n pass", "def get_info(self):\n pass", "def usage_location(self):\n if \"usageLocation\" in self._prop_dict:\n return self._prop_dict[\"usageLocation\"]\n else:\n return None", "def usage_location(self):\n if \"usageLocation\" in self._prop_dict:\n return self._prop_dict[\"usageLocation\"]\n else:\n return None", "def get_info(self) -> str:\n return self.info", "def info(self) -> Info:\n raw = self._call('GET', 'info')\n return Info.parse_raw(raw)", "def info(self):\r\n return self._get('info', {})", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def get_info(self):\n return \"TODO !\"", "def file_usage(self):\n sample_bytes = c_longlong()\n stream_bytes = c_longlong()\n other_bytes = c_longlong()\n self._call_fmod(\n \"FMOD_System_GetFileUsage\",\n byref(sample_bytes),\n byref(stream_bytes),\n byref(other_bytes),\n )\n return so(\n sample_bytes_read=sample_bytes.value,\n stream_bytes_read=stream_bytes.value,\n other_bytes_read=other_bytes.value,\n )", "def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response", "def info(self):\n return self._fetch_json('/api/info')" ]
[ "0.76958585", "0.68729985", "0.6765924", "0.6226989", "0.6201762", "0.60590756", "0.6058347", "0.5998649", "0.59470576", "0.5942935", "0.5919672", "0.5896635", "0.58876365", "0.5840067", "0.5839861", "0.5811322", "0.5733173", "0.57162476", "0.57162476", "0.570095", "0.570095", "0.56455207", "0.558659", "0.5580543", "0.5569808", "0.5569808", "0.5565358", "0.55505574", "0.55380905", "0.552352" ]
0.8030164
0
Sets the usage_information of this Listing. Usage information for the listing.
def usage_information(self, usage_information): self._usage_information = usage_information
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_usage(self, usage):\r\n self._usage = usage", "def usage(self, usage):\n\n self._usage = usage", "def usage(self, usage):\n\n self._usage = usage", "def usage(self, usage):\n\n self._usage = usage", "def usage_information(self):\n return self._usage_information", "def info(self, info):\n\n self._info = info", "def info(self, info: str):\n\n self._info = info", "def capacity_usage_details(self, capacity_usage_details):\n\n self._capacity_usage_details = capacity_usage_details", "def set_usage_attribute(name, value):\n ctx = _context.get()\n ctx.attributes[name] = value", "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def contact_information(self, contact_information: ContactInformation):\n\n self._contact_information = contact_information", "def update_usage_stats(self):\n self._usage.increment_usage_stats()", "def usage(self):\r\n return usage.Usage(self)", "def info(self, value: str):\n self._info = value", "def task_infos(self, task_infos):\n\n self._task_infos = task_infos", "def location_info(self, location_info: LocationInfoIm):\n\n self._location_info = location_info", "def paging_info(self, paging_info):\n\n self._paging_info = paging_info", "def more_info(self, more_info):\n\n self._more_info = more_info", "def set_metadata(self, metadata):\n if self.num_features != metadata.num_features:\n raise ValueError(\"Invalid metadata for feature list\")\n self.metadata = metadata", "def update(self, amz_listing):\n amz_listing.sku = self.asin\n amz_listing.title = self.title\n amz_listing.brand = self.brand\n amz_listing.model = self.model\n amz_listing.upc = self.upc\n amz_listing.quantity = self.quantity\n amz_listing.url = self.url\n amz_listing.salesrank = self.salesrank\n amz_listing.offers = self.offers\n amz_listing.hasprime = self.prime\n\n # Only update price if price information is provided\n if self._tag.xpath('.//Offers'):\n amz_listing.price = self.price", "def item_info(self, item_info):\n\n self._item_info = item_info", "def informationtype(self, informationtype):\n\n self._informationtype = informationtype", "def billing_info(self, billing_info):\n\n self._billing_info = billing_info", "def ion_health_info(self, ion_health_info):\n\n self._ion_health_info = ion_health_info", "def set_info_value(self, name: str, value: Any) -> None:\n self._info_data[name] = value", "def info_text(self, info_text):\n\n self._info_text = info_text", "def info_text(self, info_text):\n\n self._info_text = info_text", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_library_variable_set_usage_list_action(self):\n pass", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def link_information_source(self, link_information_source):\n\n self._link_information_source = link_information_source" ]
[ "0.6704771", "0.6600919", "0.6600919", "0.6600919", "0.6046543", "0.55667037", "0.54406595", "0.5418335", "0.5297958", "0.525111", "0.52360046", "0.51799953", "0.51696646", "0.5168463", "0.5115039", "0.50764394", "0.5067247", "0.5061697", "0.506115", "0.50149447", "0.50112426", "0.5000248", "0.4985119", "0.49017566", "0.48973185", "0.48948228", "0.48948228", "0.48823228", "0.48734945", "0.48396403" ]
0.8244212
0
Gets the long_description of this Listing. A long description of the listing.
def long_description(self): return self._long_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def long_description(self) -> str:\n return self._long_description", "def long_description(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"long_description\")", "def long_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"long_description\")", "def get_description(self):\n try:\n long_desc = self.__data[\"descriptions\"][\"MM - \" + self.__name][\"text\"].replace(\"<p>\", \"\").split('</p>')[0]\n return long_desc\n except:\n return None", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self._description", "def description(self):\n\n return self._get_field(\"description\")", "def get_description(self):\n\n return self._description", "def long_description(self, long_description: str):\n\n self._long_description = long_description", "def get_description(self):\n return DisplayText(self._description)", "def long_description(self, long_description):\n self._long_description = long_description", "def description(self) -> str:\n return self.data['description']", "def get_description(self):\n return self.__description", "def get_description(self) -> str:\n pass", "def get_description(self):\r\n return self.__description", "def short_description(self):\n return self._short_description", "def short_description(self):\n return self._short_description", "def description(self):\n return self._data.get('description')", "def _get_description(self):\n return self.__description", "def _get_description(self):\n return self.__description", "def getDescription(self):\n return self.description", "def getDescription(self):\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description" ]
[ "0.80596477", "0.758474", "0.72378236", "0.71228004", "0.71099156", "0.71099156", "0.71099156", "0.71099156", "0.7103312", "0.7053098", "0.7048452", "0.7044788", "0.7030812", "0.6991155", "0.69771314", "0.6967502", "0.6952499", "0.6934183", "0.69313747", "0.69313747", "0.692985", "0.69269866", "0.69269866", "0.6895325", "0.6888122", "0.68877095", "0.68877095", "0.68877095", "0.68877095", "0.68877095" ]
0.8087247
0
Sets the long_description of this Listing. A long description of the listing.
def long_description(self, long_description): self._long_description = long_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def long_description(self, long_description: str):\n\n self._long_description = long_description", "def set_longdescription(self, longdesc):\n self.longdescription(longdesc)", "def long_description(self) -> str:\n return self._long_description", "def long_description(self):\n return self._long_description", "def short_description(self, short_description):\n self._short_description = short_description", "def set_description(self, description):\n self.description = description", "def set_description(self, room_description):\n self.description = room_description", "def short_description(self, short_description):\n\n self._short_description = short_description", "def set_description(self, description):\r\n self.__description = description", "def set_description(self, description):\n self._description = description", "def set_description(self, description):\n self.__description = description", "def SetDescription(self, description):\n self.description = str(description)", "def description(self, description):\n\n self._set_field(\"description\", description)", "def long_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"long_description\")", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description" ]
[ "0.86731976", "0.8560788", "0.7149375", "0.7075606", "0.6908429", "0.68525", "0.6844741", "0.6813715", "0.6808889", "0.6801812", "0.6797638", "0.6730105", "0.6693408", "0.66631347", "0.66111076", "0.66111076", "0.66111076", "0.66111076", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146", "0.6571146" ]
0.8654559
1
Gets the license_model_description of this Listing. A description of the publisher's licensing model for the listing.
def license_model_description(self): return self._license_model_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def license_model_description(self, license_model_description):\n self._license_model_description = license_model_description", "def license(self): # noqa: A003\n logger.debug(\"Get license\")\n return self._raw_api.license.get()", "def license_details(self):\n if \"licenseDetails\" in self._prop_dict:\n return LicenseDetailsCollectionPage(self._prop_dict[\"licenseDetails\"])\n else:\n return None", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.licence.name:s}\"", "def model_description(self) -> Optional[str]:\n return self.profile_device.model_description", "def license_number(self):\n return self._license_number", "async def get_license(self) -> APIReturn:\n return await self._request(\"GET\", \"/getLicense\")", "def get_license(self):\n etree = self.get_eml()\n project_license_dict = etree.find('.//intellectualRights/para/ulink')\n project_license = project_license_dict.get('url')\n return project_license", "def get_license_string(self):\n output = ''\n if self.license_id:\n output += '{}'.format(self.license_id)\n if self.license_creation_date:\n output += ' (Created {})'.format(self.license_creation_date)\n if self.license_type:\n output += ' {}'.format(self.license_type)\n if self.license_status:\n output += ' - {}'.format(self.license_status)\n return output", "def licenses(self) -> Sequence[str]:\n return pulumi.get(self, \"licenses\")", "def License(self, default=None):\n return self.data.get('license', default)", "def get_description(self):\n\n return self._description", "def getLicenseList(self):\n\n res = self.getRequest('licenses')\n licenses = list()\n if res:\n for item in iter(res['items']):\n lic = vsdModels.License(**item)\n licenses.append(lic)\n\n return licenses", "def get_description(self):\n return self._description", "def license_plate(self) -> str:\n return self.numerify(self.generator.parse(self.random_element(self.license_formats)))", "def model(self) -> Model:\n return self.software_system.get_model()", "def get_license_info(self):\n\t\treturn Job(SDK.PrlSrv_GetLicenseInfo(self.handle)[0])", "def get_description(self):\n return COMPONENT_LIST[self.index][1]", "def get_description(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetDescription', self.handle)", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.__description", "def software_license(self) -> str:\n return self.random.choice(LICENSES)", "def getLicense(self, resource):\n\n if isinstance(resource, int):\n resource = 'licenses/{0}'.format(resource)\n\n res = self.getRequest(resource)\n if res:\n license = vsdModels.License(**res)\n\n return license\n else:\n return None", "def getDescription(self):\n return self._description", "def _get_description(self):\n return self.__description", "def _get_description(self):\n return self.__description", "def custom_licenses(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n if result < 0:\n raise errors.JLinkException(result)\n return ctypes.string_at(buf).decode()" ]
[ "0.65090805", "0.63407576", "0.6327641", "0.6098437", "0.60412365", "0.5976658", "0.59197986", "0.58929926", "0.5860846", "0.5827089", "0.5769789", "0.5571917", "0.5557723", "0.55281246", "0.5523411", "0.55039614", "0.54873824", "0.5485993", "0.54852253", "0.5459995", "0.5459995", "0.5459995", "0.5459995", "0.54351074", "0.54266375", "0.5422554", "0.5413618", "0.54106945", "0.54106945", "0.5399952" ]
0.83493125
0
Sets the license_model_description of this Listing. A description of the publisher's licensing model for the listing.
def license_model_description(self, license_model_description): self._license_model_description = license_model_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def license_model_description(self):\n return self._license_model_description", "def license(self, license):\n\n self._license = license", "def set_description(self, sDescription):\n\t\tcall_sdk_function('PrlVirtNet_SetDescription', self.handle, sDescription)", "def set_description(self, room_description):\n self.description = room_description", "def set_description(self, description):\n self._description = description", "def set_description(self, description):\n self.description = description", "def set_description(self, sNewDescription):\n\t\tcall_sdk_function('PrlVmDev_SetDescription', self.handle, sNewDescription)", "def document_description(self, document_description):\n\n self._document_description = document_description", "def document_description(self, document_description):\n\n self._document_description = document_description", "def set_description(self, description):\n self.__description = description", "def set_description(self, description):\r\n self.__description = description", "def SetDescription(self, description):\n self.description = str(description)", "def license_number(self, license_number):\n\n self._license_number = license_number", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description" ]
[ "0.6921612", "0.6202522", "0.5927343", "0.5893093", "0.57872677", "0.5785446", "0.578273", "0.5752546", "0.5752546", "0.5742983", "0.5742669", "0.56861144", "0.5664043", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547", "0.5637547" ]
0.8649689
0
Gets the system_requirements of this Listing. System requirements for the listing.
def system_requirements(self): return self._system_requirements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_requirements(self, system_requirements):\n self._system_requirements = system_requirements", "def requirements(self):\n requirements = []\n return requirements", "def python_requirements(self):\n try:\n dist = self.requirement.pip_requirement.get_dist()\n extras = self.requirement.pip_requirement.extras\n requirements = list(dist.requires(extras))\n except Exception:\n logger.warning(\"Failed to determine installation requirements of %s \"\n \"using pkg-resources, falling back to old implementation.\",\n self, exc_info=True)\n requirements = self.python_requirements_fallback\n logger.debug(\"Python requirements of %s: %r\", self, requirements)\n return requirements", "def primitive_requirements(self):\n\t\treturn self.typemanager.primitive_list", "def get_requirement_strings(self):\n opts = self.get_options()\n return (\n opts.requirements,\n opts.timeout_requirements,\n opts.cov_requirements,\n opts.unittest2_requirements,\n )", "def get_system_defined(self):\n\n\t\treturn self.__system_defined", "def requirements(self, context):\n\n requirements = []\n\n # Get all the tasks and the lists (so the .fill on lists are also\n # considered.)\n all_tasks = list(self.tasks) + list(flatten(self.tasks, context))\n for task in all_tasks:\n task_details = getattr(task, '__garcon__', None)\n if task_details:\n requirements += task_details.get('requirements', [])\n else:\n raise NoRunnerRequirementsFound()\n return set(requirements)", "def system(self):\n return self['system']", "def get_requirements():\n command = ['pip', 'list']\n result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)\n assert not result.stderr, \"stderr not empty\"\n return result.stdout", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def get_requirements():\n requirements_list = []\n\n if not os.path.isfile(REQUIREMENTS_FILE):\n # Check if requirements file did not exist.\n return requirements_list\n\n with open(REQUIREMENTS_FILE) as reqs:\n for install in reqs:\n requirements_list.append(install.strip())\n\n return requirements_list", "def system_roles(self) -> api.SystemRoles:\n return self._get_model(model=api.SystemRoles)", "def data_requirements(self) -> List[DataRequirement]:\n return [\n self.bmi_cfg_data_requirement,\n self.forcing_data_requirement,\n self.hydrofabric_data_requirement,\n self.partition_cfg_data_requirement,\n self.realization_cfg_data_requirement,\n ]", "def system(self):\n try:\n return self._system\n except AttributeError:\n raise AttributeError('You must initialize the system with '\n 'createSystem before accessing the cached '\n 'object.')", "def unit_system(self):\n val = self._stub.List(self._message).unit_system\n return map_unit_system[val]", "def requirements(self):\n required = set()\n for u in self.updates:\n required = set.union(required, u.requirements)\n return required", "def requires(self):\n return [GetListings()]", "def getSysinfo(self, request):\r\n return self._ref.callRemote('getSysinfo')", "def from_sys_requirements(cls, system_requirements, _type='all'):\n allowed_types = ['all', 'clusterSpec', 'instanceType', 'fpgaDriver']\n if _type not in (allowed_types):\n raise DXError(\"Expected '_type' to be one of the following: {}\".format(allowed_types))\n\n if _type == 'all':\n return cls(system_requirements)\n\n extracted = defaultdict(dict)\n for entrypoint, req in system_requirements.items():\n if _type in req:\n extracted[entrypoint][_type] = req[_type]\n return cls(dict(extracted))", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def get_system_flags(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSystemFlags', self.handle)", "def get_benchmark_requirements(cls):\n pass", "def system_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SystemServiceArgs']]]]:\n return pulumi.get(self, \"system_services\")", "def system_data(self) -> 'outputs.DataCollectionEndpointResourceResponseSystemData':\n return pulumi.get(self, \"system_data\")", "def getRequirements():\n\n \n cudaLibsOk = checkCUDAisAvailable() \n \n conditionalRequirements = []\n if cudaLibsOk:\n conditionalRequirements += [\"tensorflow-gpu==1.15.3\", ]\n else:\n print(\"\\n CUDA it's not available in your machine.\")\n print(\" You won't be able to use the GPU support.\\n\")\n #if olderPip or olderSetuptools:\n #tfRequirement = \"tensorflow==1.15.0\"\n #else:\n tfRequirement = \"tensorflow==1.15.3\"\n \n conditionalRequirements += [tfRequirement]\n\n return conditionalRequirements", "def specs_to_prereq(self):\n return self._create_list_for(\"prerequisites\")", "def read_requirements():\r\n reqs_path = os.path.join('.', 'requirements.txt')\r\n with open(reqs_path, 'r') as f:\r\n requirements = [line.rstrip() for line in f]\r\n return requirements", "def req():\n\n if not current.auth.s3_logged_in():\n return None\n\n ADMIN = current.session.s3.system_roles.ADMIN\n settings = current.deployment_settings\n types = settings.get_req_req_type()\n\n get_vars = {}\n if len(types) == 1:\n t = types[0]\n if t == \"Stock\":\n get_vars = {\"type\": \"1\"}\n elif t == \"People\":\n get_vars = {\"type\": \"2\"}\n create_menu = M(\"Create\", m=\"create\", vars=get_vars)\n\n recurring = lambda i: settings.get_req_recurring()\n use_commit = lambda i: settings.get_req_use_commit()\n req_items = lambda i: \"Stock\" in types\n req_skills = lambda i: \"People\" in types\n\n return M(c=\"req\")(\n M(\"Current Needs\", f=\"organisation_needs\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\", restrict=[ADMIN]),\n ),\n M(\"Needs at Facilities\", f=\"site_needs\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Requests\", f=\"req\", vars=get_vars)(\n create_menu,\n M(\"List Recurring Requests\", f=\"req_template\", check=recurring),\n M(\"Map\", m=\"map\"),\n M(\"Report\", m=\"report\"),\n M(\"Search All Requested Items\", f=\"req_item\",\n check=req_items),\n M(\"Search All Requested Skills\", f=\"req_skill\",\n check=req_skills),\n ),\n M(\"Commitments\", f=\"commit\", check=use_commit)(\n ),\n M(\"Items\", c=\"supply\", f=\"item\")(\n M(\"Create\", m=\"create\"),\n M(\"Report\", m=\"report\"),\n M(\"Import\", m=\"import\", p=\"create\"),\n ),\n # Catalog Items moved to be next to the Item Categories\n #M(\"Catalog Items\", c=\"supply\", f=\"catalog_item\")(\n #M(\"Create\", m=\"create\"),\n #),\n M(\"Catalogs\", c=\"supply\", f=\"catalog\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Item Categories\", c=\"supply\", f=\"item_category\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def getRequirements(self, nj=[]):\n req = 'other.GlueCEStateStatus == \"Production\" '\n if self.executable_arch:\n req+=' && Member(\"VO-cms-' + \\\n self.executable_arch + \\\n '\", other.GlueHostApplicationSoftwareRunTimeEnvironment)'\n\n req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'\n\n return req", "def requires(self):\n return []" ]
[ "0.6756265", "0.630042", "0.6126706", "0.5784309", "0.5776431", "0.5704219", "0.5702918", "0.5696625", "0.56659317", "0.56201077", "0.5616559", "0.5568308", "0.5549997", "0.5487405", "0.5485939", "0.548016", "0.5447736", "0.5438381", "0.54212105", "0.54085964", "0.53814477", "0.5322622", "0.53040123", "0.5301934", "0.5226663", "0.52181816", "0.5210064", "0.5203694", "0.5196849", "0.5192214" ]
0.8690019
0
Sets the system_requirements of this Listing. System requirements for the listing.
def system_requirements(self, system_requirements): self._system_requirements = system_requirements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_requirements(self):\n return self._system_requirements", "def _set_system_forces(self, system):\n forces = self.results[self.force_handle]\n system.forces = forces.view(system.n_replicas, system.n_molecules,\n system.max_n_atoms,3) * \\\n self.force_conversion", "def from_sys_requirements(cls, system_requirements, _type='all'):\n allowed_types = ['all', 'clusterSpec', 'instanceType', 'fpgaDriver']\n if _type not in (allowed_types):\n raise DXError(\"Expected '_type' to be one of the following: {}\".format(allowed_types))\n\n if _type == 'all':\n return cls(system_requirements)\n\n extracted = defaultdict(dict)\n for entrypoint, req in system_requirements.items():\n if _type in req:\n extracted[entrypoint][_type] = req[_type]\n return cls(dict(extracted))", "def system(self, system):\n\n self._system = system", "def system(self, system):\n\n self._system = system", "def _update_system(self, system):\n for p in self.required_properties:\n if p not in self.results:\n raise MDCalculatorError('Requested property {:s} not in '\n 'results'.format(p))\n elif p == self.force_handle:\n self._set_system_forces(system)\n else:\n dim = self.results[p].shape\n system.properties[p] = self.results[p].view(\n system.n_replicas, system.n_molecules, *dim[1:]) * \\\n self.property_conversion[p]", "def set_system_defined(self, system_defined):\n\n\t\tif system_defined is not None and not isinstance(system_defined, bool):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: system_defined EXPECTED TYPE: bool', None, None)\n\t\t\n\t\tself.__system_defined = system_defined\n\t\tself.__key_modified['system_defined'] = 1", "def system_status(self, system_status):\n\n self._system_status = system_status", "def set_system_flags(self, sNewVmSystemFlags):\n\t\tcall_sdk_function('PrlVmCfg_SetSystemFlags', self.handle, sNewVmSystemFlags)", "def requirements(self, requirements):\n if requirements is not None and len(requirements) > 1024:\n raise ValueError(\"Invalid value for `requirements`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._requirements = requirements", "def __verify_requirements(self):\n if self.major[1] not in self.data[self.root] or self.data[self.root][self.major[1]] is None:\n self.data[self.root][self.major[1]] = {\"Requirement\": []}\n elif \"Requirement\" not in self.data[self.root][self.major[1]] or self.data[self.root][self.major[1]][\"Requirement\"] is None:\n self.data[self.root][self.major[1]][\"Requirement\"] = []\n elif not isinstance(self.data[self.root][self.major[1]][\"Requirement\"], list):\n self.data[self.root][self.major[1]][\"Requirement\"] = [self.data[self.root][self.major[1]][\"Requirement\"]]", "def add_requirements(self, fgraph):\r\n # Added by default\r\n #fgraph.attach_feature(toolbox.ReplaceValidate())\r\n pass", "def requirements(self):\n requirements = []\n return requirements", "def update_system_versions(self):\n #system_versions = [SystemVersion(id=-1 ,type=u'QX100',desc=u'Unknown Hardware version'),\n # SystemVersion(id=0 ,type=u'QX100',desc=u'QX100 - HW Rev A/B'),\n system_versions = [SystemVersion(id=1 ,type=u'QX100', desc=u'QX100 - HW Rev A/B bigger detector cap differences'),\n SystemVersion(id=2 ,type=u'QX100', desc=u'QX100 - HW Rev C'),\n SystemVersion(id=3 ,type=u'QX150', desc=u'QX150 - HW Rev Z Upgrade'),\n SystemVersion(id=4 ,type=u'QX200', desc=u'QX200 - HW Rev Z'),\n SystemVersion(id=5 ,type=u'QX201', desc=u'QX200 - HW with BR built Detector'),\n\t\t\t SystemVersion(id=6 ,type=u'QX150L', desc=u'QX150 - HW Rev Z Upgrade with LED'),\n SystemVersion(id=7 ,type=u'QX201L', desc=u'QX201 - HW with BR built LED Detector'),\n SystemVersion(id=200,type=u'QX200', desc=u'QX200 - Pre-Beta HW')]\n for sv in system_versions:\n dbsv = Session.query(SystemVersion).filter_by(id=sv.id).first()\n if not dbsv:\n Session.add(sv)\n else:\n if (dbsv.type != sv.type):\n dbsv.type = sv.type\n if( dbsv.desc != sv.desc):\n dbsv.desc = sv.desc\n\n Session.commit()", "def update_system_changes(self, system):\n if system[\"current_info\"]:\n system[\"changes\"] = dict()\n\n # Check if management paths should be updated\n if (sorted(system[\"controller_addresses\"]) != sorted(system[\"current_info\"][\"managementPaths\"]) or\n system[\"current_info\"][\"ip1\"] not in system[\"current_info\"][\"managementPaths\"] or\n system[\"current_info\"][\"ip2\"] not in system[\"current_info\"][\"managementPaths\"]):\n system[\"changes\"].update({\"controllerAddresses\": system[\"controller_addresses\"]})\n\n # Check for expected meta tag count\n if len(system[\"meta_tags\"]) != len(system[\"current_info\"][\"metaTags\"]):\n if len(system[\"meta_tags\"]) == 0:\n system[\"changes\"].update({\"removeAllTags\": True})\n else:\n system[\"changes\"].update({\"metaTags\": system[\"meta_tags\"]})\n\n # Check for expected meta tag key-values\n else:\n for index in range(len(system[\"meta_tags\"])):\n if (system[\"current_info\"][\"metaTags\"][index][\"key\"] != system[\"meta_tags\"][index][\"key\"] or\n sorted(system[\"current_info\"][\"metaTags\"][index][\"valueList\"]) != sorted(system[\"meta_tags\"][index][\"valueList\"])):\n system[\"changes\"].update({\"metaTags\": system[\"meta_tags\"]})\n break\n\n # Check whether CA certificate should be accepted\n if system[\"accept_certificate\"] and not all([controller[\"certificateStatus\"] == \"trusted\" for controller in system[\"current_info\"][\"controllers\"]]):\n system[\"changes\"].update({\"acceptCertificate\": True})\n\n if system[\"id\"] not in self.undiscovered_systems and system[\"changes\"]:\n self.systems_to_update.append(system)", "def add_requirements(self, fgraph):\r\n pass", "def needs_update(self, system, environment_input):\n pass", "def setSysinfo(self, request, value):\r\n return self._ref.callRemote('setSysinfo', value)", "def set_system_name(self, system_name):\n\n\t\tif system_name is not None and not isinstance(system_name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: system_name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__system_name = system_name\n\t\tself.__key_modified['system_name'] = 1", "def update_system_setup(linsys_setup, vcov=None, ivcov=None, maps=None):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, _maps, g_nu, \\\n map_prop, _vcov, _ivcov = linsys_setup\n \n # Update relevant components\n if vcov is not None: _vcov = vcov\n if ivcov is not None: _ivcov = ivcov\n if maps is not None: _maps = maps\n \n # Construct updated linear system setup\n new_linsys_setup = ( datamaps, ninvs, beams, freqs, power_2d, precond_2d, \n _maps, g_nu, map_prop, _vcov, _ivcov )\n return new_linsys_setup", "def install_system_dependencies():\n with sudo(), silent():\n info('Install system dependencies')\n system_dependencies = blueprint.get('system_dependencies')\n\n if system_dependencies:\n dependencies = []\n repositories = []\n ppa_dependencies = []\n for dependency in system_dependencies:\n dep, _, rep = dependency.partition('@')\n if rep:\n if rep not in repositories:\n repositories.append(rep)\n\n ppa_dependencies.append(dep)\n elif dep not in dependencies:\n dependencies.append(dep)\n\n debian.apt_get_update()\n debian.apt_get('install', *dependencies)\n\n if repositories:\n for repository in repositories:\n debian.add_apt_repository(repository, src=True)\n\n debian.apt_get_update()\n debian.apt_get('install', *ppa_dependencies)", "def update(self, system, environment_input):\n pass", "def setAvailSystems(self):\n self.availSystems = []\n if self.toSystem != self.fromSystem:\n self.availSystems.append(self.fromSystem)\n return\n else:\n mySystem = self.myGalaxy.systems[self.fromSystem]\n if mySystem.myEmpireID == self.empireID or globals.diplomacy[self.myGalaxy.empires[mySystem.myEmpireID].diplomacy[self.empireID].diplomacyID]['alliance'] == 1:\n self.availSystems = mySystem.getAllConnections()\n else:\n for otherSystemID in mySystem.connectedSystems:\n otherSystem = self.myGalaxy.systems[otherSystemID]\n if otherSystem.myEmpireID == self.empireID or globals.diplomacy[self.myGalaxy.empires[otherSystem.myEmpireID].diplomacy[self.empireID].diplomacyID]['move'] == 1:\n self.availSystems.append(otherSystemID)\n self.oldAvailSystems = copy.copy(self.availSystems)", "def _install_system_requirements_linux(self):\n self.output.info(\"Calling v8/build/install-build-deps.sh\")\n os.environ[\"PATH\"] += os.pathsep + os.path.join(self.source_folder, \"depot_tools\")\n sh_script = self.source_folder + \"/v8/build/install-build-deps.sh\"\n self.run(\"chmod +x \" + sh_script)\n cmd = sh_script + \" --unsupported --no-arm --no-nacl --no-backwards-compatible --no-chromeos-fonts --no-prompt \"\n cmd = cmd + (\"--syms\" if str(self.settings.build_type) == \"Debug\" else \"--no-syms\")\n cmd = \"export DEBIAN_FRONTEND=noninteractive && \" + cmd\n self.run(cmd)", "def system_api_version(self, system_api_version):\n\n self._system_api_version = system_api_version", "def system_amount(self, system_amount):\n\n self._system_amount = system_amount", "def update_system(self, system):\n try:\n rc, storage_system = self.request(\"storage-systems/%s\" % system[\"ssid\"], method=\"POST\", data=system[\"changes\"])\n except Exception as error:\n self.module.warn(\"Failed to update storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))", "def constraintsHardware(self, componentsRequirements):\n\n self.problem.logger.debug(\"constraintsHardware: componentsRequirements={}\".format(componentsRequirements))\n componentsRequirements = [[0 if i is None else i for i in line] for line in componentsRequirements]\n\n # ITE version - ProcProv\n tmp = []\n for k in range(self.nrVM):\n tmp.append(sum([If(self.a[i * self.nrVM + k], 1, 0) * componentsRequirements[i][0]\n for i in range(self.nrComp)]) <= self.ProcProv[k])\n self.solver.add(tmp)\n self.problem.logger.debug(\"tmp:{}\".format(tmp))\n #\n # # ITE version - MemProv\n tmp = []\n for k in range(self.nrVM):\n tmp.append(sum([If(self.a[i * self.nrVM + k], 1, 0) * (componentsRequirements[i][1])\n for i in range(self.nrComp)]) <= self.MemProv[k])\n self.solver.add(tmp)\n self.problem.logger.debug(\"tmp:{}\".format(tmp))\n\n # # ITE version - StorageProv\n tmp = []\n for k in range(self.nrVM):\n tmp.append(sum([If(self.a[i * self.nrVM + k], 1, 0) * (componentsRequirements[i][2])\n for i in range(self.nrComp)]) <= self.StorageProv[k])\n self.solver.add(tmp)\n self.problem.logger.debug(\"tmp:{}\".format(tmp))", "def python_requirements(self):\n try:\n dist = self.requirement.pip_requirement.get_dist()\n extras = self.requirement.pip_requirement.extras\n requirements = list(dist.requires(extras))\n except Exception:\n logger.warning(\"Failed to determine installation requirements of %s \"\n \"using pkg-resources, falling back to old implementation.\",\n self, exc_info=True)\n requirements = self.python_requirements_fallback\n logger.debug(\"Python requirements of %s: %r\", self, requirements)\n return requirements", "def remote_setSysinfo(self, request, value):\r\n raise NotImplementedError" ]
[ "0.6983736", "0.5838846", "0.5674688", "0.56331927", "0.56331927", "0.5464949", "0.5319611", "0.52284116", "0.52232224", "0.50656945", "0.50553805", "0.50460213", "0.5040736", "0.49555758", "0.49499926", "0.49416316", "0.49393702", "0.49228254", "0.49070564", "0.4841495", "0.48139292", "0.47884735", "0.47832027", "0.47831762", "0.47327083", "0.47057664", "0.4679446", "0.4663428", "0.4631114", "0.46303126" ]
0.8679492
0
Gets the time_released of this Listing. The release date of the listing.
def time_released(self): return self._time_released
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_time(self) -> str:\n return pulumi.get(self, \"release_time\")", "def release_date(self):\n for item in self.proto.releaseInfo.item:\n if item.label == 'Released on':\n return item.container.value", "def time_released(self, time_released):\n self._time_released = time_released", "def license_date(self):\n return self._license_date", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def get_release_version(self):\n return self.get_property(ADB.VERSION_RELEASE_PROPERTY)", "def getPublishedTime(self): #$NON-NLS-1$\r", "def RELEASE(self):\n return get_release()", "def planned_purge_date(self):\n return self._planned_purge_date", "def planned_purge_date(self):\n return self._planned_purge_date", "def get_time(self):\n return self.__time", "def rt_dep_time(self):\n return self._rt_dep_time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def provisioning_time(self) -> str:\n return pulumi.get(self, \"provisioning_time\")", "def arrival_time(self):\r\n return self.__arrival_time", "def arrival_time(self):\r\n return self.__arrival_time", "def OfferReserveTime(self):\n if self.force_auto_sync:\n self.get('OfferReserveTime')\n return self._OfferReserveTime", "def LeaseTime(self):\n if self.force_auto_sync:\n self.get('LeaseTime')\n return self._LeaseTime", "def dt(self):\n return self.__dt", "def free_flight_time(self):\n return self._free_flight_time", "def get_time(self):\n return self.time", "def last_update_time(self):\n return self._last_update_time", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")" ]
[ "0.7647471", "0.70362586", "0.5931893", "0.5763734", "0.5631487", "0.5537523", "0.5472509", "0.5465901", "0.5461748", "0.5461748", "0.5450319", "0.54452556", "0.5418696", "0.5418696", "0.5418696", "0.54063195", "0.54048216", "0.54048216", "0.53873295", "0.5381522", "0.53613603", "0.53609914", "0.53325415", "0.5316463", "0.5315263", "0.5315263", "0.53125817", "0.53125817", "0.5308112", "0.5308112" ]
0.7279181
1
Sets the time_released of this Listing. The release date of the listing.
def time_released(self, time_released): self._time_released = time_released
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_released(self):\n return self._time_released", "def release_time(self) -> str:\n return pulumi.get(self, \"release_time\")", "def set_time_available(self, new_value):\n\n self.available_at = new_value\n self.save()", "def release_date(self):\n for item in self.proto.releaseInfo.item:\n if item.label == 'Released on':\n return item.container.value", "def license_date(self, license_date):\n\n self._license_date = license_date", "def set_release(self, type=None, stack=None, version=None, build=None, notes=None, display=None,\n compatible=None):\n release_element = self.root_element.find(\"./release\")\n\n if release_element is None:\n raise Exception(\"Element 'release' is not found\")\n\n if type:\n update_simple(release_element, \"type\", type)\n\n if stack:\n update_simple(release_element, \"stack-id\", stack)\n\n if version:\n update_simple(release_element, \"version\", version)\n\n if build:\n update_simple(release_element, \"build\", build)\n\n if compatible:\n update_simple(release_element, \"compatible-with\", compatible)\n\n if notes:\n update_simple(release_element, \"release-notes\", notes)\n\n if display:\n update_simple(release_element, \"display\", display)", "def delivery_time(self, delivery_time):\n\n self._delivery_time = delivery_time", "def release_epoch(self, release_epoch):\n\n self._release_epoch = release_epoch", "def free_flight_time(self, free_flight_time):\n\n self._free_flight_time = free_flight_time", "def __set_release(self, project):\r\n release = project.session.create(self._config['release'])\r\n _logger.info(\"Current release: '%s'\" % project.release)\r\n _logger.info(\"Configuration release: '%s'\" % release)\r\n if project.release != release:\r\n _logger.info(\"Updating release on the project hierarchy.\")\r\n for subp in [project] + project.subprojects:\r\n subp.release = release", "def set_release_date(self, release_date):\n if not isinstance(release_date, int):\n timestamp = int(release_date.timestamp())\n else:\n timestamp = release_date\n\n # restrictions are a JSON object; this is the default one to use:\n restriction = {\n 'c': [\n {\n 'd': '>=',\n 't': timestamp,\n 'type': 'date'\n }\n ],\n 'op': '&',\n 'showc': [False]\n }\n\n verbose = False\n\n # payload from form is run through cleaning function to substitute\n # in the values that are wanted\n def _clean(payload):\n if verbose:\n print(\"Incoming\")\n for k in sorted(payload.keys()):\n print(k, payload[k])\n if 'availabilityconditionsjson' not in payload or \\\n not payload['availabilityconditionsjson']:\n restr = restriction\n logger.debug(\"No existing restriction\")\n else:\n restr = json.loads(payload['availabilityconditionsjson'])\n print(\"Loaded\", restr)\n logger.debug(\"Loaded existing restriction: %s\",\n payload['availabilityconditionsjson'])\n\n date_restrs = [r for r in restr['c'] if r['type'] == 'date']\n if len(date_restrs) > 1:\n logger.error(\"Can't handle multiple date restrictions\")\n return {}\n\n # Look for an existing date restriction and update it\n for term in restr['c']:\n if term['type'] == 'date':\n term['t'] = timestamp\n break\n else:\n # Finally adding one in if it's not there\n restr['c'].append(restriction['c'][0])\n restr['showc'].append(False)\n\n logger.debug(\"Final restriction: %s\", json.dumps(restr))\n payload['availabilityconditionsjson'] = json.dumps(restr)\n\n # Cleanse keys from the form that cause trouble\n badkeys = ['cancel', 'submitbutton']\n for k in badkeys:\n payload.pop(k, None)\n\n if verbose:\n print(\"Outgoing\")\n for k in sorted(payload.keys()):\n print(k, payload[k])\n\n return payload\n\n response = self.course.moodle.fetch_from_form(\n self._settings_get_form_url.format(id=self.id),\n self._settings_set_form_url,\n _clean,\n )\n logger.debug(\"Sent data, status code: %s\", response.status_code)", "def release_update(self, surfaces, release_state): \n # Set the integrator to the release state.\n self._s2 = (release_state['t'], release_state['q'], release_state['p'])\n\n # Remove the constraints from the system.\n remove_constraints(self, surfaces)\n #remove_list = []\n #for con in self._state1_impacts:\n # if con in self.system.constraints:\n # remove_list.append(con)\n #remove_constraints(self, remove_list) \n\n self._tau1 = TAU(self)\n\n # Step to the end of the current timestep.\n if self.t_end != self.t2:\n self.lambda0 = self.lambda1\n self.step(self.t_end, k2=self.kin_config(self.t_end))", "def set_time(self, time):\n self._time = time", "def rt_dep_time(self, rt_dep_time):\n\n self._rt_dep_time = rt_dep_time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def release_dates(self, **kwargs):\n path = self._get_movie_id_path('release_date')\n resp = self._get_method(path, kwargs)\n return resp", "def set_time(self, set_time):\n\n self._set_time = set_time", "def submit_time(self, submit_time: datetime):\n\n self._submit_time = submit_time", "def availability(self, availability):\n\n self._availability = availability", "def date_time(self, date_time):\n\n self._date_time = date_time", "def completion_time(self, completion_time: datetime):\n\n self._completion_time = completion_time", "def release_notes(self, release_notes):\n self._release_notes = release_notes", "def release(self, release):\n if release is None:\n raise ValueError(\"Invalid value for `release`, must not be `None`\") # noqa: E501\n if release is not None and len(release) < 1:\n raise ValueError(\"Invalid value for `release`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._release = release", "def upgrade_time(self, upgrade_time):\n\n self._upgrade_time = upgrade_time", "def expire_time(self, expire_time):\n\n self._expire_time = expire_time", "def time_updated(self, time_updated):\n self._time_updated = time_updated" ]
[ "0.5934729", "0.58847374", "0.55999017", "0.5470613", "0.54162925", "0.52752566", "0.5228138", "0.51995313", "0.51778126", "0.51119363", "0.5065449", "0.50520337", "0.5027209", "0.500003", "0.49566588", "0.49566588", "0.49566588", "0.49566588", "0.49566588", "0.4927865", "0.4895046", "0.48904476", "0.48900214", "0.48880866", "0.4884362", "0.4853627", "0.48471937", "0.4836646", "0.48269", "0.48022813" ]
0.77573067
0
Gets the release_notes of this Listing. Release notes for the listing.
def release_notes(self): return self._release_notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes", "def getNotes(self):\n logger.debug(\"Func: getNotes\")\n\n return self._currentNotes", "def getNotes(self):\n return self.__notes", "def notes(self):\n return reapy.NoteList(self)", "def notes(self):\n return self._notes", "def notes(self):\n return self.__notes", "def Releases():\n return releases", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]", "def notes(self):\n return notes.Notes(self)", "def getNotes(self):\n return self._nednotes, self._ongcnotes", "def getNotes(self, *args):\n return _libsbml.SBase_getNotes(self, *args)", "def business_notes(self):\n return self._business_notes", "def notes(self) -> str:\n return self._notes", "def notes(self):\r\n return notes.Notes(self)", "def notes(self):\r\n return notes.Notes(self)", "def release_notes(self, release_notes):\n self._release_notes = release_notes", "def notes(self):\r\n return TicketNotes(self)", "def notes(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"notes\")", "def notes(self):\n return NotesTable(self.rpc, self.name)", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def get_field_notes(self):\n return self._fields_notes", "def ls(self, count = 200):\n return self._manager.ls_notes(self['id'], count)", "def notes(self):\n return Notes(self)", "def notes(self) -> Optional[str]:\n return pulumi.get(self, \"notes\")", "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def GetNotes(self, request, global_params=None):\n config = self.GetMethodConfig('GetNotes')\n return self._RunMethod(\n config, request, global_params=global_params)", "async def get_releases(\n self, prerelease: bool = False, returnlimit: int = 5\n ) -> [\"AIOGitHubAPIRepositoryRelease\"] or list:\n _endpoint = f\"/repos/{self.full_name}/releases\"\n\n response = await self.client.get(endpoint=_endpoint)\n contents = []\n\n for content in response or []:\n if len(contents) == returnlimit:\n break\n if not prerelease:\n if content.get(\"prerelease\", False):\n continue\n contents.append(AIOGitHubAPIRepositoryRelease(content))\n\n return contents", "def listNotes() -> list:\n list_of_notes = []\n for note in Note.objects.all():\n list_of_notes.append({\n 'uuid': note.uuid, 'title': note.title,\n 'author': note.author, 'body': note.body, 'created_at': localtime(note.created_at)\n })\n return list_of_notes", "def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note" ]
[ "0.6843257", "0.6499268", "0.64411867", "0.61444026", "0.61170226", "0.6035091", "0.59695387", "0.58302104", "0.58184755", "0.58065593", "0.5796641", "0.57310104", "0.5706721", "0.57033986", "0.568316", "0.568316", "0.56737494", "0.5639619", "0.5477934", "0.5467729", "0.5440825", "0.53551865", "0.5332874", "0.53274167", "0.5315697", "0.52657187", "0.5265718", "0.52568686", "0.52268565", "0.5138552" ]
0.7991021
0
Sets the release_notes of this Listing. Release notes for the listing.
def release_notes(self, release_notes): self._release_notes = release_notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def revision_notes(self, revision_notes):\n\n self._revision_notes = revision_notes", "def release_notes(self):\n return self._release_notes", "def set_note_version_server(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n #Get the list of WebRtc nuget pakcages with prereleases\n packages = NugetUtility.nuget_cli('list', 'Id:WebRtc', '-PreRelease')\n packages = packages.split('\\r\\n')\n webrtcRegex = r\"^WebRtc+\\s\"\n #Search the list of the packages for a WebRtc package and set the version\n for package in packages:\n if re.match(webrtcRegex, package, flags=0):\n version = package\n\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version) \n \n # return to the base directory\n Utility.popd()", "def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes: str):\n self._notes = notes", "def setNotes(self, *args):\n return _libsbml.SBase_setNotes(self, *args)", "def notes(self, notes):\n if notes is None:\n raise ValueError(\"Invalid value for `notes`, must not be `None`\") # noqa: E501\n\n self._notes = notes", "def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes", "def notes(self, notes):\n if (self.local_vars_configuration.client_side_validation and\n notes is not None and len(notes) > 255):\n raise ValueError(\"Invalid value for `notes`, length must be less than or equal to `255`\") # noqa: E501\n\n self._notes = notes", "def removed_dates_notes(self, removed_dates_notes):\n\n self._removed_dates_notes = removed_dates_notes", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def business_notes(self, business_notes):\n if business_notes is not None and len(business_notes) > 2000:\n raise ValueError(\"Invalid value for `business_notes`, length must be less than or equal to `2000`\")\n\n self._business_notes = business_notes", "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def documentation_links(self, documentation_links):\n self._documentation_links = documentation_links", "def support_tickets(self, support_tickets):\n\n self._support_tickets = support_tickets", "def set_release(self, type=None, stack=None, version=None, build=None, notes=None, display=None,\n compatible=None):\n release_element = self.root_element.find(\"./release\")\n\n if release_element is None:\n raise Exception(\"Element 'release' is not found\")\n\n if type:\n update_simple(release_element, \"type\", type)\n\n if stack:\n update_simple(release_element, \"stack-id\", stack)\n\n if version:\n update_simple(release_element, \"version\", version)\n\n if build:\n update_simple(release_element, \"build\", build)\n\n if compatible:\n update_simple(release_element, \"compatible-with\", compatible)\n\n if notes:\n update_simple(release_element, \"release-notes\", notes)\n\n if display:\n update_simple(release_element, \"display\", display)", "def remind_all_finally_to_update_release_notes(self):\n names = read_from_file(self.pr.config.releaseItemsFileMergedBy)\n LOG.debug(\"final names list =\" + names)\n if names:\n time.sleep(10)\n\n msg = RELEASE_NOTES_REMINDER.format(msg=names, release_notes_link=self.pr.config.release_notes_link,\n qa_team=self.pr.config.qaTeamMembers)\n self.slack.postToSlack(channel=self.pr.config.alertChannelName, msg=msg)\n self.clean_up_for_next_cycle()", "def __set_release(self, project):\r\n release = project.session.create(self._config['release'])\r\n _logger.info(\"Current release: '%s'\" % project.release)\r\n _logger.info(\"Configuration release: '%s'\" % release)\r\n if project.release != release:\r\n _logger.info(\"Updating release on the project hierarchy.\")\r\n for subp in [project] + project.subprojects:\r\n subp.release = release", "def added_dates_notes(self, added_dates_notes):\n\n self._added_dates_notes = added_dates_notes", "def release(self, release):\n if release is None:\n raise ValueError(\"Invalid value for `release`, must not be `None`\") # noqa: E501\n if release is not None and len(release) < 1:\n raise ValueError(\"Invalid value for `release`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._release = release", "def notesChanged(self):\n if self.controller:\n self.versionProp.updateVersion(self.controller.current_version)", "def notes_setup(self):\n pass", "def versions(self, versions):\n\n self._versions = versions", "def support_links(self, support_links):\n self._support_links = support_links", "def notes(self):\n return self._notes" ]
[ "0.6267436", "0.6157792", "0.5947801", "0.58771783", "0.5817846", "0.5817846", "0.5817846", "0.5817846", "0.5817846", "0.5638602", "0.5454901", "0.5404618", "0.52281815", "0.51389474", "0.49515978", "0.4910483", "0.48980397", "0.48685953", "0.4809314", "0.47723645", "0.4762867", "0.47626823", "0.47533134", "0.47269678", "0.46938434", "0.46886054", "0.45870715", "0.45866984", "0.45188385", "0.44856164" ]
0.81215
0
Sets the categories of this Listing. Categories that the listing belongs to.
def categories(self, categories): self._categories = categories
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories: List[str]):\n\n self._categories = categories", "def set_categories(self, categories: NounCategories):\n \n self._categories = categories", "def setCategories(self, categories):\n vocabulary = dict(self.getCategoryVocabulary())\n self.categories = OrderedDict()\n for catId in categories:\n name = vocabulary.get(catId, None)\n if name is not None:\n self.categories[catId] = vocabulary[catId]\n else:\n # Sliently ignore that category id, it doesn't have a matching category name.\n # I apologize if you found this comment after hours of digging around code. \n pass", "def category_names(self, category_names):\n\n self._category_names = category_names", "def categories_taxonomy(self, categories_taxonomy):\n\n self._categories_taxonomy = categories_taxonomy", "def Categories(self, new_categories):\r\n if not isinstance(new_categories, ListType):\r\n raise TypeError(\"The supplied categories must be a list of \"\r\n \"strings.\")\r\n for new_cat in new_categories:\r\n if not isinstance(new_cat, str):\r\n raise TypeError(\"Invalid category: not of type 'string'\")\r\n elif new_cat not in self._metadata_map.CategoryNames:\r\n raise ValueError(\"The category '%s' is not in the mapping \"\r\n \"file.\" % new_cat)\r\n\r\n if not self._suppress_numeric_category_check:\r\n if not self._metadata_map.isNumericCategory(new_cat):\r\n raise TypeError(\"The category '%s' is not numeric. Not \"\r\n \"all values could be converted to numbers.\"\r\n % new_cat)\r\n\r\n if not self._suppress_category_uniqueness_check:\r\n if self._metadata_map.hasUniqueCategoryValues(new_cat):\r\n raise ValueError(\"All values in category '%s' are unique. \"\r\n \"This statistical method cannot operate \"\r\n \"on a category with unique values (e.g. \"\r\n \"there are no 'within' distances because \"\r\n \"each group of samples contains only a \"\r\n \"single sample).\" % new_cat)\r\n\r\n if not self._suppress_single_category_value_check:\r\n if self._metadata_map.hasSingleCategoryValue(new_cat):\r\n raise ValueError(\"All values in category '%s' are the \"\r\n \"same. This statistical method cannot \"\r\n \"operate on a category that creates only \"\r\n \"a single group of samples (e.g. there \"\r\n \"are no 'between' distances because \"\r\n \"there is only a single group).\"\r\n % new_cat)\r\n\r\n self._categories = new_categories", "def list_categories(self):\n raise NotImplementedError()", "def categories_display(self, categories_display):\n\n self._categories_display = categories_display", "def fill_tab_categories(self):\n self.category.fill_tab_categories(self.list_categories, self.mycursor, self.my_database)", "def categories_id(self, categories_id):\n\n self._categories_id = categories_id", "def categories(self):\n pass", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def __append_to_category_list(self):\n Category.get_category_list().append(self)", "def category(self, category: str):\n\n self._category = category", "def categories_level(self, categories_level):\n\n self._categories_level = categories_level", "def update_categories(self):\n categories = {}\n datasets = self.data['dataset']\n used_categories = self._get_list_categories_used(datasets)\n for category in used_categories:\n categories.update({\n category: self._get_datasets_tasks_by_category(datasets, category)\n })\n self.data[\"category\"] = categories", "def for_categories(self, categories):\n\n return self.__class__(\n self.session,\n state=self.state,\n term=self.term,\n order=self.order,\n direction=self.direction,\n issues=self.issues,\n categories=categories,\n organizations=self.organizations,\n user_ids=self.user_ids,\n group_ids=self.group_ids\n )", "def category_id(self, category_id):\n\n self._category_id = category_id", "def add_categories(categories, business):\n\tfor category in business[CATEGORIES]:\n\t\tcategories.add(category)", "def category(self, category: Category):\n\n self._category = category", "def get_category_list(cls):\n if Category.__category_list is None:\n Category.__category_list = []\n return Category.__category_list", "def categories(self) -> List[str]:\n return self._categories" ]
[ "0.7307259", "0.7307259", "0.7307259", "0.7307259", "0.7300853", "0.68660516", "0.66410893", "0.6177779", "0.6134643", "0.6062947", "0.60496455", "0.58617896", "0.5834949", "0.574634", "0.5729895", "0.5644923", "0.5644923", "0.5644923", "0.5644923", "0.5644923", "0.56407124", "0.5580245", "0.5542397", "0.5521415", "0.5500383", "0.54457045", "0.54187644", "0.54115874", "0.53845304", "0.53790265" ]
0.73104125
0
Sets the publisher of this Listing.
def publisher(self, publisher): self._publisher = publisher
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_publisher (self, publisher):\n self.publisher = publisher", "def publisher(self, publisher):\r\n return publishers.Publisher(self, publisher)", "def publisher(self):\n return self.get(\"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def Publisher(self, default=None):\n return self.data.get('publisher', default)", "def publisher(self):\n return self._publisher", "def get_publisher(self):\n return self.publisher", "def publish(self, publisher):\n publisher._send(self.payload.event, self.info, *self.payload.args,\n **self.payload.kwargs)", "def publisher(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher\")", "def pub(self, pub):\n\n self._pub = pub", "def source_of_published(self, source_of_published):\n\n self._source_of_published = source_of_published", "def publisher_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"publisher_name\")", "def publisher_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_name\")", "def publication_type(self, publication_type):\n\n self._publication_type = publication_type", "def publisher_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_name\")", "def publisher_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_domain\")", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def published(self, published):\n if published is None:\n raise ValueError(\"Invalid value for `published`, must not be `None`\") # noqa: E501\n\n self._published = published", "def seller(self, seller):\n\n self._seller = seller", "def pubchem(self, pubchem):\n\n self._pubchem = pubchem", "def publish(self):\n self.published = True\n self.save()# pylint: disable=no-member", "def _registerPublisher(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterPublishedTopic:\n self.__docWriter.addPub(callerId, topic, topicType)", "def setPubs(self, key, val):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\") # convert str to bytes\n return self.setVal(self.pubs, key, val)", "def publisher_domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_domain\")", "def buyer(self, buyer):\n\n self._buyer = buyer", "def republish(self, republish):\n\n self._republish = republish", "def publish_meeting(self, publish_meeting):\n\n self._publish_meeting = publish_meeting" ]
[ "0.83886194", "0.7004004", "0.66248053", "0.63609666", "0.63609666", "0.63609666", "0.63100815", "0.62869966", "0.6277295", "0.6160284", "0.61506903", "0.61506903", "0.6065866", "0.5782139", "0.5739575", "0.5720117", "0.5717196", "0.5521795", "0.5436713", "0.526103", "0.52038825", "0.5170857", "0.51708335", "0.5153608", "0.51308477", "0.5099328", "0.5090266", "0.5087723", "0.50858885", "0.5080926" ]
0.797822
1
Gets the languages of this Listing. Languages supported by the listing.
def languages(self): return self._languages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLanguages(self):\n return self.__getColumnData(Q_LANGUAGES, 'language')", "def languages(self):\n return LanguageCodes.english_names", "def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def available_languages(self):\n data = self._run(\n url_path=\"languages/available\"\n )\n return data['result'].get('languages', [])", "def languages(self):\n\n return self._request('/languages')", "def languages(self) -> localedata.LocaleDataDict:\n return self._data['languages']", "def get_languages(self):\n language_list = []\n url = '%s%s/languages.xml' % (self.URL_API, self.API_KEY)\n data = urllib.urlopen(url)\n root = cElementTree.parse(data).getroot()\n for language in root.iter('Language'):\n language_list.append(language.find('abbreviation').text)\n return language_list", "def languages(self):\n if not self.has_languages:\n self._languages = dict(self._lodgeit.pastes.getLanguages())\n return self._languages", "def GetLanguages(cls):\n return sorted(cls._LANGUAGE_PER_TAG.items())", "def available_languages():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getAvailableLanguages()\n return [DEFAULT_LANGUAGE]", "def get_learning_languages(self):\n return self.userlanguage_set.exclude(level='N')", "def Languages(self, default=[\"en\"]):\n return self.data.get('metadata', {}).get('languages', default)", "def supported_languages(self) -> list[str]:\n return SUPPORTED_LANGUAGE_CODES", "def get_all_languages():\n\tdef _get():\n\t\tif not frappe.db:\n\t\t\tfrappe.connect()\n\t\treturn frappe.db.sql_list('select name from tabLanguage')\n\treturn frappe.cache().get_value('languages', _get)", "def wikiLanguages():\n return languages", "def _get_available_languages(self):\n return stopwords.fileids()", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def get_locales(self) -> List[str]:\n\n return self.possible_locale_list", "def get_supported_languages ( self ):\n supported_lang = self.service_creator.company_supported_languages ( )\n supported_lang = \", \".join ( supported_lang )\n return jsonify ( {\"Supported languages\": supported_lang} )", "def list_project_languages(self, project_id):\n data = self._run(\n url_path=\"languages/list\",\n id=project_id\n )\n return data['result'].get('languages', [])", "def get_languages(self):\n titles = Title.objects.filter(page=self)\n if not hasattr(self, \"languages_cache\"):\n languages = []\n for t in titles:\n if t.language not in languages:\n languages.append(t.language)\n self.languages_cache = languages\n return self.languages_cache", "def get_native_languages(self):\n return self.userlanguage_set.filter(level='N')", "def languages():\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/getLangs')\n return r.json['dirs']", "def get_all_languages(with_language_name: bool = False) -> list:\n\n\tdef get_language_codes():\n\t\treturn frappe.get_all(\"Language\", filters={\"enabled\": 1}, pluck=\"name\")\n\n\tdef get_all_language_with_name():\n\t\treturn frappe.get_all(\"Language\", [\"language_code\", \"language_name\"], {\"enabled\": 1})\n\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\tif with_language_name:\n\t\treturn frappe.cache.get_value(\"languages_with_name\", get_all_language_with_name)\n\telse:\n\t\treturn frappe.cache.get_value(\"languages\", get_language_codes)", "def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp", "def getAvailableLanguages(self):\n url = \"http://www.youtube.com/api/timedtext?v=%s&type=list\" % self.video_id\n xml = urllib2.urlopen(url)\n tree = ET.parse(xml)\n root = tree.getroot()\n languages = {}\n for child in root:\n languages[child.attrib[\"lang_code\"]] = child.attrib[\"lang_translated\"]\n return languages", "def released_languages_list(self):\r\n if not self.released_languages.strip(): # pylint: disable=no-member\r\n return []\r\n\r\n languages = [lang.strip() for lang in self.released_languages.split(',')] # pylint: disable=no-member\r\n # Put in alphabetical order\r\n languages.sort()\r\n return languages", "def get_languages(self) -> dict:\n request_url = self.__API_URL.format(user=self._user,\n project=self._project)\n response = self._http.request('GET', request_url,\n headers=config.HEADERS)\n\n # Handle limits and wrong responses\n if response.status > 205:\n raise StatusError(status=response.status)\n\n return json.loads(response.data)", "def listSupportedGrammarCheckingLanguages(cls, path=None):\n lib = cls.__getLib()\n\n def listOperation(p):\n return lib.voikkoListSupportedGrammarCheckingLanguages(p)\n\n return cls.__listSupportedLanguagesForOperation(path, lib, listOperation)", "def language_tags(self):\n return self.properties.get(\"languageTags\", StringCollection())" ]
[ "0.77883005", "0.7666184", "0.75871485", "0.75005174", "0.7490036", "0.7477735", "0.74218374", "0.7164394", "0.7100529", "0.70897734", "0.7082067", "0.70374054", "0.696649", "0.69658625", "0.6942555", "0.69273937", "0.6868959", "0.67511004", "0.6733299", "0.6725548", "0.6691533", "0.6547391", "0.64989877", "0.6493729", "0.6463262", "0.6436736", "0.642613", "0.64002573", "0.6327326", "0.6252414" ]
0.7978811
0
Sets the languages of this Listing. Languages supported by the listing.
def languages(self, languages): self._languages = languages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def languages(self, languages):\n\n self._languages = languages", "def spoken_languages(self, spoken_languages):\n\n self._spoken_languages = spoken_languages", "def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def set_language(self, lang):\n self.lang = lang", "def test_set_language(self):\n # Test for default languages\n self.assertEqual(self.scraper.language_original, 'jpn')\n self.assertEqual(self.scraper.language_translated, 'eng')\n\n # Test after setting supported languages\n self.scraper.set_languages('jpn', 'eng')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')\n\n # Test after setting non-supported languages\n self.scraper.set_languages('eng', 'lol')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')", "def setup_site_languages(context):\n portal = context.getSite()\n ltool = portal.portal_languages\n \n defaultLanguage = bc.default_language\n supportedLanguages = list(bc.zope_i18n_allowed_languages.split())\n ltool.manage_setLanguageSettings(defaultLanguage, supportedLanguages,\n setUseCombinedLanguageCodes=True,\n setCookieN=True, setRequestN=True)\n logger.info(\"Site languages enabled.\")", "def languages(self):\n return self._languages", "def setLanguage(self, value):\n return self._call_java(\"setLanguage\", value)", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def languages(self):\n return LanguageCodes.english_names", "def languages(self):\n if not self.has_languages:\n self._languages = dict(self._lodgeit.pastes.getLanguages())\n return self._languages", "def reset_languages(self):\n self._languages = None", "def languages(self):\n\n return self._request('/languages')", "async def langs(self, context):\n languages = get_langs(context.message.guild)\n await context.channel.send(LANG_LIST.format(nb_lang=len(languages), langs=enum(languages)))", "def Languages(self, default=[\"en\"]):\n return self.data.get('metadata', {}).get('languages', default)", "def language(self, language: str):\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language: str):\n\n self._language = language", "def __updateLanguages(self):\n self.__ensureTranslationEngineReady()\n if self.__translationEngine is not None:\n supportedCodes = self.__translationEngine.supportedLanguages()\n enabledCodes = self.__plugin.getPreferences(\"EnabledLanguages\")\n \n # 1. save current selections\n origLanguage = self.origLanguageComboBox.itemData(\n self.origLanguageComboBox.currentIndex())\n \n # 2. reload the original language combo box\n self.origLanguageComboBox.blockSignals(True)\n self.origLanguageComboBox.clear()\n for code in enabledCodes:\n if code in supportedCodes:\n language = self.__languages.getLanguage(code)\n if language:\n icon = self.__languages.getLanguageIcon(code)\n self.origLanguageComboBox.addItem(\n icon, language, code)\n self.origLanguageComboBox.model().sort(0)\n origIndex = self.origLanguageComboBox.findData(origLanguage)\n if origIndex == -1:\n origIndex = 0\n self.origLanguageComboBox.blockSignals(False)\n self.origLanguageComboBox.setCurrentIndex(origIndex)", "def languages(self) -> localedata.LocaleDataDict:\n return self._data['languages']", "def setRobotLanguage(self):\n\n try:\n assert self.languageTag in self.tts.getSupportedLanguages()\n self.tts.setLanguage(self.languageTag)\n\n except AssertionError:\n self.logger.warning(self.languageTag + \" is not supported by the robot, language set \"\\\n \"to English\")\n\n self.tts.setLanguage(self.ENGLISH_TAG)", "def set_language(self, lang):\n\n self.language = lang\n\n self.add_metadata('DC', 'language', lang)", "def SetLanguage(self, language):\n try:\n newDict = guicmd.CommandInterface.MessageHandler.GetLanguageDict(language)\n if newDict:\n self.languageDict = newDict\n self.language = language\n except:\n pass", "def list_languages(self):\n known = [ob.capitalize() for ob in self.caller.languages.known_languages]\n known += [\"Arvani\"]\n self.msg(\"{wYou can currently speak:{n %s\" % \", \".join(known))\n self.msg(\n \"You can learn %s additional languages.\"\n % self.caller.languages.additional_languages\n )", "def wikiLanguages():\n return languages", "def getLanguages(self):\n return self.__getColumnData(Q_LANGUAGES, 'language')" ]
[ "0.7733243", "0.67889374", "0.6149776", "0.5953575", "0.58947927", "0.5871789", "0.5811527", "0.57943386", "0.5733784", "0.56714165", "0.5670487", "0.5616679", "0.56014025", "0.55900526", "0.5583264", "0.55787045", "0.55644816", "0.55644816", "0.55644816", "0.55644816", "0.55644816", "0.5523711", "0.551836", "0.5488977", "0.5460556", "0.54011303", "0.52849305", "0.52783376", "0.5247929", "0.52174485" ]
0.7791193
0