body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def seconds(s): '\n Return seconds as days.\n ' return (float(s) / SEC_PER_DAY)
-8,024,640,055,269,441,000
Return seconds as days.
env/lib/python2.7/site-packages/matplotlib/dates.py
seconds
rbalda/neural_ocr
python
def seconds(s): '\n \n ' return (float(s) / SEC_PER_DAY)
def minutes(m): '\n Return minutes as days.\n ' return (float(m) / MINUTES_PER_DAY)
2,918,726,190,601,350,000
Return minutes as days.
env/lib/python2.7/site-packages/matplotlib/dates.py
minutes
rbalda/neural_ocr
python
def minutes(m): '\n \n ' return (float(m) / MINUTES_PER_DAY)
def hours(h): '\n Return hours as days.\n ' return (h / HOURS_PER_DAY)
-7,078,883,573,613,321,000
Return hours as days.
env/lib/python2.7/site-packages/matplotlib/dates.py
hours
rbalda/neural_ocr
python
def hours(h): '\n \n ' return (h / HOURS_PER_DAY)
def weeks(w): '\n Return weeks as days.\n ' return (w * DAYS_PER_WEEK)
3,757,993,147,974,712,000
Return weeks as days.
env/lib/python2.7/site-packages/matplotlib/dates.py
weeks
rbalda/neural_ocr
python
def weeks(w): '\n \n ' return (w * DAYS_PER_WEEK)
def __init__(self, fmt): ' fmt: any valid strptime format is supported ' self.fmt = fmt
-4,169,169,694,858,552,000
fmt: any valid strptime format is supported
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, fmt): ' ' self.fmt = fmt
def __call__(self, s): 's : string to be converted\n return value: a date2num float\n ' return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
6,304,710,515,264,939,000
s : string to be converted return value: a date2num float
env/lib/python2.7/site-packages/matplotlib/dates.py
__call__
rbalda/neural_ocr
python
def __call__(self, s): 's : string to be converted\n return value: a date2num float\n ' return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def __init__(self, fmt, encoding='utf-8'): "\n Args:\n fmt: any valid strptime format is supported\n encoding: encoding to use on byte input (default: 'utf-8')\n " super(bytespdate2num, self).__init__(fmt) self.encoding = encoding
-7,057,810,267,372,294,000
Args: fmt: any valid strptime format is supported encoding: encoding to use on byte input (default: 'utf-8')
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, fmt, encoding='utf-8'): "\n Args:\n fmt: any valid strptime format is supported\n encoding: encoding to use on byte input (default: 'utf-8')\n " super(bytespdate2num, self).__init__(fmt) self.encoding = encoding
def __call__(self, b): '\n Args:\n b: byte input to be converted\n Returns:\n A date2num float\n ' s = b.decode(self.encoding) return super(bytespdate2num, self).__call__(s)
6,083,521,617,235,164,000
Args: b: byte input to be converted Returns: A date2num float
env/lib/python2.7/site-packages/matplotlib/dates.py
__call__
rbalda/neural_ocr
python
def __call__(self, b): '\n Args:\n b: byte input to be converted\n Returns:\n A date2num float\n ' s = b.decode(self.encoding) return super(bytespdate2num, self).__call__(s)
def __init__(self, fmt, tz=None): '\n *fmt* is a :func:`strftime` format string; *tz* is the\n :class:`tzinfo` instance.\n ' if (tz is None): tz = _get_rc_timezone() self.fmt = fmt self.tz = tz
-7,895,130,219,791,011,000
*fmt* is a :func:`strftime` format string; *tz* is the :class:`tzinfo` instance.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, fmt, tz=None): '\n *fmt* is a :func:`strftime` format string; *tz* is the\n :class:`tzinfo` instance.\n ' if (tz is None): tz = _get_rc_timezone() self.fmt = fmt self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement): 'Helper function for replacing substrings sub1 and sub2\n located at the same indexes in strings s1 and s2 respectively,\n with the string replacement. It is expected that sub1 and sub2\n have the same length. Returns the pair s1, s2 after the\n substitutions.\n ' i = 0 while True: j = s1.find(sub1, i) if (j == (- 1)): break i = (j + 1) if (s2[j:(j + len(sub2))] != sub2): continue s1 = ((s1[:j] + replacement) + s1[(j + len(sub1)):]) s2 = ((s2[:j] + replacement) + s2[(j + len(sub2)):]) return (s1, s2)
-8,579,673,710,969,332,000
Helper function for replacing substrings sub1 and sub2 located at the same indexes in strings s1 and s2 respectively, with the string replacement. It is expected that sub1 and sub2 have the same length. Returns the pair s1, s2 after the substitutions.
env/lib/python2.7/site-packages/matplotlib/dates.py
_replace_common_substr
rbalda/neural_ocr
python
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement): 'Helper function for replacing substrings sub1 and sub2\n located at the same indexes in strings s1 and s2 respectively,\n with the string replacement. It is expected that sub1 and sub2\n have the same length. Returns the pair s1, s2 after the\n substitutions.\n ' i = 0 while True: j = s1.find(sub1, i) if (j == (- 1)): break i = (j + 1) if (s2[j:(j + len(sub2))] != sub2): continue s1 = ((s1[:j] + replacement) + s1[(j + len(sub1)):]) s2 = ((s2[:j] + replacement) + s2[(j + len(sub2)):]) return (s1, s2)
def strftime_pre_1900(self, dt, fmt=None): "Call time.strftime for years before 1900 by rolling\n forward a multiple of 28 years.\n\n *fmt* is a :func:`strftime` format string.\n\n Dalke: I hope I did this math right. Every 28 years the\n calendar repeats, except through century leap years excepting\n the 400 year leap years. But only if you're using the Gregorian\n calendar.\n " if (fmt is None): fmt = self.fmt fmt = re.sub('((^|[^%])(%%)*)%f', '\\g<1>{0:06d}'.format(dt.microsecond), fmt) year = dt.year delta = (2000 - year) off = (6 * ((delta // 100) + (delta // 400))) year = (year + off) year1 = (year + (((2000 - year) // 28) * 28)) year2 = (year1 + 28) timetuple = dt.timetuple() s1 = time.strftime(fmt, ((year1,) + timetuple[1:])) s2 = time.strftime(fmt, ((year2,) + timetuple[1:])) (s1, s2) = self._replace_common_substr(s1, s2, '{0:04d}'.format(year1), '{0:04d}'.format(year2), '{0:04d}'.format(dt.year)) (s1, s2) = self._replace_common_substr(s1, s2, '{0:02d}'.format((year1 % 100)), '{0:02d}'.format((year2 % 100)), '{0:02d}'.format((dt.year % 100))) return cbook.unicode_safe(s1)
-3,409,017,546,486,234,000
Call time.strftime for years before 1900 by rolling forward a multiple of 28 years. *fmt* is a :func:`strftime` format string. Dalke: I hope I did this math right. Every 28 years the calendar repeats, except through century leap years excepting the 400 year leap years. But only if you're using the Gregorian calendar.
env/lib/python2.7/site-packages/matplotlib/dates.py
strftime_pre_1900
rbalda/neural_ocr
python
def strftime_pre_1900(self, dt, fmt=None): "Call time.strftime for years before 1900 by rolling\n forward a multiple of 28 years.\n\n *fmt* is a :func:`strftime` format string.\n\n Dalke: I hope I did this math right. Every 28 years the\n calendar repeats, except through century leap years excepting\n the 400 year leap years. But only if you're using the Gregorian\n calendar.\n " if (fmt is None): fmt = self.fmt fmt = re.sub('((^|[^%])(%%)*)%f', '\\g<1>{0:06d}'.format(dt.microsecond), fmt) year = dt.year delta = (2000 - year) off = (6 * ((delta // 100) + (delta // 400))) year = (year + off) year1 = (year + (((2000 - year) // 28) * 28)) year2 = (year1 + 28) timetuple = dt.timetuple() s1 = time.strftime(fmt, ((year1,) + timetuple[1:])) s2 = time.strftime(fmt, ((year2,) + timetuple[1:])) (s1, s2) = self._replace_common_substr(s1, s2, '{0:04d}'.format(year1), '{0:04d}'.format(year2), '{0:04d}'.format(dt.year)) (s1, s2) = self._replace_common_substr(s1, s2, '{0:02d}'.format((year1 % 100)), '{0:02d}'.format((year2 % 100)), '{0:02d}'.format((dt.year % 100))) return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None): 'Refer to documentation for datetime.strftime.\n\n *fmt* is a :func:`strftime` format string.\n\n Warning: For years before 1900, depending upon the current\n locale it is possible that the year displayed with %x might\n be incorrect. For years before 100, %y and %Y will yield\n zero-padded strings.\n ' if (fmt is None): fmt = self.fmt fmt = self.illegal_s.sub('\\1', fmt) fmt = fmt.replace('%s', 's') if (dt.year >= 1900): return cbook.unicode_safe(dt.strftime(fmt)) return self.strftime_pre_1900(dt, fmt)
-7,673,140,819,755,084,000
Refer to documentation for datetime.strftime. *fmt* is a :func:`strftime` format string. Warning: For years before 1900, depending upon the current locale it is possible that the year displayed with %x might be incorrect. For years before 100, %y and %Y will yield zero-padded strings.
env/lib/python2.7/site-packages/matplotlib/dates.py
strftime
rbalda/neural_ocr
python
def strftime(self, dt, fmt=None): 'Refer to documentation for datetime.strftime.\n\n *fmt* is a :func:`strftime` format string.\n\n Warning: For years before 1900, depending upon the current\n locale it is possible that the year displayed with %x might\n be incorrect. For years before 100, %y and %Y will yield\n zero-padded strings.\n ' if (fmt is None): fmt = self.fmt fmt = self.illegal_s.sub('\\1', fmt) fmt = fmt.replace('%s', 's') if (dt.year >= 1900): return cbook.unicode_safe(dt.strftime(fmt)) return self.strftime_pre_1900(dt, fmt)
def __init__(self, t, fmt, tz=None): '\n *t* is a sequence of dates (floating point days). *fmt* is a\n :func:`strftime` format string.\n ' if (tz is None): tz = _get_rc_timezone() self.t = t self.fmt = fmt self.tz = tz
3,572,620,911,891,964,000
*t* is a sequence of dates (floating point days). *fmt* is a :func:`strftime` format string.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, t, fmt, tz=None): '\n *t* is a sequence of dates (floating point days). *fmt* is a\n :func:`strftime` format string.\n ' if (tz is None): tz = _get_rc_timezone() self.t = t self.fmt = fmt self.tz = tz
def __call__(self, x, pos=0): 'Return the label for time *x* at position *pos*' ind = int(round(x)) if ((ind >= len(self.t)) or (ind <= 0)): return '' dt = num2date(self.t[ind], self.tz) return cbook.unicode_safe(dt.strftime(self.fmt))
7,515,494,125,788,098,000
Return the label for time *x* at position *pos*
env/lib/python2.7/site-packages/matplotlib/dates.py
__call__
rbalda/neural_ocr
python
def __call__(self, x, pos=0): ind = int(round(x)) if ((ind >= len(self.t)) or (ind <= 0)): return dt = num2date(self.t[ind], self.tz) return cbook.unicode_safe(dt.strftime(self.fmt))
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): '\n Autoformat the date labels. The default format is the one to use\n if none of the values in ``self.scaled`` are greater than the unit\n returned by ``locator._get_unit()``.\n ' self._locator = locator self._tz = tz self.defaultfmt = defaultfmt self._formatter = DateFormatter(self.defaultfmt, tz) self.scaled = {DAYS_PER_YEAR: '%Y', DAYS_PER_MONTH: '%b %Y', 1.0: '%b %d %Y', (1.0 / HOURS_PER_DAY): '%H:%M:%S', (1.0 / MINUTES_PER_DAY): '%H:%M:%S.%f'}
-1,554,169,645,260,470,300
Autoformat the date labels. The default format is the one to use if none of the values in ``self.scaled`` are greater than the unit returned by ``locator._get_unit()``.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): '\n Autoformat the date labels. The default format is the one to use\n if none of the values in ``self.scaled`` are greater than the unit\n returned by ``locator._get_unit()``.\n ' self._locator = locator self._tz = tz self.defaultfmt = defaultfmt self._formatter = DateFormatter(self.defaultfmt, tz) self.scaled = {DAYS_PER_YEAR: '%Y', DAYS_PER_MONTH: '%b %Y', 1.0: '%b %d %Y', (1.0 / HOURS_PER_DAY): '%H:%M:%S', (1.0 / MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __init__(self, tz=None): '\n *tz* is a :class:`tzinfo` instance.\n ' if (tz is None): tz = _get_rc_timezone() self.tz = tz
-9,082,949,987,731,717,000
*tz* is a :class:`tzinfo` instance.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, tz=None): '\n \n ' if (tz is None): tz = _get_rc_timezone() self.tz = tz
def set_tzinfo(self, tz): '\n Set time zone info.\n ' self.tz = tz
-1,745,468,367,312,040,700
Set time zone info.
env/lib/python2.7/site-packages/matplotlib/dates.py
set_tzinfo
rbalda/neural_ocr
python
def set_tzinfo(self, tz): '\n \n ' self.tz = tz
def datalim_to_dt(self): '\n Convert axis data interval to datetime objects.\n ' (dmin, dmax) = self.axis.get_data_interval() if (dmin > dmax): (dmin, dmax) = (dmax, dmin) return (num2date(dmin, self.tz), num2date(dmax, self.tz))
-7,588,518,086,582,918,000
Convert axis data interval to datetime objects.
env/lib/python2.7/site-packages/matplotlib/dates.py
datalim_to_dt
rbalda/neural_ocr
python
def datalim_to_dt(self): '\n \n ' (dmin, dmax) = self.axis.get_data_interval() if (dmin > dmax): (dmin, dmax) = (dmax, dmin) return (num2date(dmin, self.tz), num2date(dmax, self.tz))
def viewlim_to_dt(self): '\n Converts the view interval to datetime objects.\n ' (vmin, vmax) = self.axis.get_view_interval() if (vmin > vmax): (vmin, vmax) = (vmax, vmin) return (num2date(vmin, self.tz), num2date(vmax, self.tz))
9,095,579,861,209,821,000
Converts the view interval to datetime objects.
env/lib/python2.7/site-packages/matplotlib/dates.py
viewlim_to_dt
rbalda/neural_ocr
python
def viewlim_to_dt(self): '\n \n ' (vmin, vmax) = self.axis.get_view_interval() if (vmin > vmax): (vmin, vmax) = (vmax, vmin) return (num2date(vmin, self.tz), num2date(vmax, self.tz))
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' return 1
-8,673,055,773,312,316,000
Return how many days a unit of the locator is; used for intelligent autoscaling.
env/lib/python2.7/site-packages/matplotlib/dates.py
_get_unit
rbalda/neural_ocr
python
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' return 1
def _get_interval(self): '\n Return the number of units for each tick.\n ' return 1
2,503,878,047,653,216,000
Return the number of units for each tick.
env/lib/python2.7/site-packages/matplotlib/dates.py
_get_interval
rbalda/neural_ocr
python
def _get_interval(self): '\n \n ' return 1
def nonsingular(self, vmin, vmax): '\n Given the proposed upper and lower extent, adjust the range\n if it is too close to being singular (i.e. a range of ~0).\n\n ' unit = self._get_unit() interval = self._get_interval() if (abs((vmax - vmin)) < 1e-06): vmin -= ((2 * unit) * interval) vmax += ((2 * unit) * interval) return (vmin, vmax)
6,927,860,421,802,992,000
Given the proposed upper and lower extent, adjust the range if it is too close to being singular (i.e. a range of ~0).
env/lib/python2.7/site-packages/matplotlib/dates.py
nonsingular
rbalda/neural_ocr
python
def nonsingular(self, vmin, vmax): '\n Given the proposed upper and lower extent, adjust the range\n if it is too close to being singular (i.e. a range of ~0).\n\n ' unit = self._get_unit() interval = self._get_interval() if (abs((vmax - vmin)) < 1e-06): vmin -= ((2 * unit) * interval) vmax += ((2 * unit) * interval) return (vmin, vmax)
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' freq = self.rule._rrule._freq return self.get_unit_generic(freq)
-6,887,433,986,755,907,000
Return how many days a unit of the locator is; used for intelligent autoscaling.
env/lib/python2.7/site-packages/matplotlib/dates.py
_get_unit
rbalda/neural_ocr
python
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' freq = self.rule._rrule._freq return self.get_unit_generic(freq)
def autoscale(self): '\n Set the view limits to include the data range.\n ' (dmin, dmax) = self.datalim_to_dt() delta = relativedelta(dmax, dmin) try: start = (dmin - delta) except ValueError: start = _from_ordinalf(1.0) try: stop = (dmax + delta) except ValueError: stop = _from_ordinalf(3652059.9999999) self.rule.set(dtstart=start, until=stop) (dmin, dmax) = self.datalim_to_dt() vmin = self.rule.before(dmin, True) if (not vmin): vmin = dmin vmax = self.rule.after(dmax, True) if (not vmax): vmax = dmax vmin = date2num(vmin) vmax = date2num(vmax) return self.nonsingular(vmin, vmax)
2,355,684,293,106,261,500
Set the view limits to include the data range.
env/lib/python2.7/site-packages/matplotlib/dates.py
autoscale
rbalda/neural_ocr
python
def autoscale(self): '\n \n ' (dmin, dmax) = self.datalim_to_dt() delta = relativedelta(dmax, dmin) try: start = (dmin - delta) except ValueError: start = _from_ordinalf(1.0) try: stop = (dmax + delta) except ValueError: stop = _from_ordinalf(3652059.9999999) self.rule.set(dtstart=start, until=stop) (dmin, dmax) = self.datalim_to_dt() vmin = self.rule.before(dmin, True) if (not vmin): vmin = dmin vmax = self.rule.after(dmax, True) if (not vmax): vmax = dmax vmin = date2num(vmin) vmax = date2num(vmax) return self.nonsingular(vmin, vmax)
def __init__(self, tz=None, minticks=5, maxticks=None, interval_multiples=False): "\n *minticks* is the minimum number of ticks desired, which is used to\n select the type of ticking (yearly, monthly, etc.).\n\n *maxticks* is the maximum number of ticks desired, which controls\n any interval between ticks (ticking every other, every 3, etc.).\n For really fine-grained control, this can be a dictionary mapping\n individual rrule frequency constants (YEARLY, MONTHLY, etc.)\n to their own maximum number of ticks. This can be used to keep\n the number of ticks appropriate to the format chosen in\n :class:`AutoDateFormatter`. Any frequency not specified in this\n dictionary is given a default value.\n\n *tz* is a :class:`tzinfo` instance.\n\n *interval_multiples* is a boolean that indicates whether ticks\n should be chosen to be multiple of the interval. This will lock\n ticks to 'nicer' locations. For example, this will force the\n ticks to be at hours 0,6,12,18 when hourly ticking is done at\n 6 hour intervals.\n\n The AutoDateLocator has an interval dictionary that maps the\n frequency of the tick (a constant from dateutil.rrule) and a\n multiple allowed for that ticking. The default looks like this::\n\n self.intervald = {\n YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,\n 1000, 2000, 4000, 5000, 10000],\n MONTHLY : [1, 2, 3, 4, 6],\n DAILY : [1, 2, 3, 7, 14],\n HOURLY : [1, 2, 3, 4, 6, 12],\n MINUTELY: [1, 5, 10, 15, 30],\n SECONDLY: [1, 5, 10, 15, 30],\n MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,\n 5000, 10000, 20000, 50000, 100000, 200000, 500000,\n 1000000],\n }\n\n The interval is used to specify multiples that are appropriate for\n the frequency of ticking. For instance, every 7 days is sensible\n for daily ticks, but for minutes/seconds, 15 or 30 make sense.\n You can customize this dictionary by doing::\n\n locator = AutoDateLocator()\n locator.intervald[HOURLY] = [3] # only show every 3 hours\n " DateLocator.__init__(self, tz) self._locator = YearLocator() self._freq = YEARLY self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY, SECONDLY, MICROSECONDLY] self.minticks = minticks self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12, MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8} if (maxticks is not None): try: self.maxticks.update(maxticks) except TypeError: self.maxticks = dict(zip(self._freqs, ([maxticks] * len(self._freqs)))) self.interval_multiples = interval_multiples self.intervald = {YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500, 1000, 2000, 4000, 5000, 10000], MONTHLY: [1, 2, 3, 4, 6], DAILY: [1, 2, 3, 7, 14, 21], HOURLY: [1, 2, 3, 4, 6, 12], MINUTELY: [1, 5, 10, 15, 30], SECONDLY: [1, 5, 10, 15, 30], MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]} self._byranges = [None, range(1, 13), range(1, 32), range(0, 24), range(0, 60), range(0, 60), None]
-5,306,297,637,022,860,000
*minticks* is the minimum number of ticks desired, which is used to select the type of ticking (yearly, monthly, etc.). *maxticks* is the maximum number of ticks desired, which controls any interval between ticks (ticking every other, every 3, etc.). For really fine-grained control, this can be a dictionary mapping individual rrule frequency constants (YEARLY, MONTHLY, etc.) to their own maximum number of ticks. This can be used to keep the number of ticks appropriate to the format chosen in :class:`AutoDateFormatter`. Any frequency not specified in this dictionary is given a default value. *tz* is a :class:`tzinfo` instance. *interval_multiples* is a boolean that indicates whether ticks should be chosen to be multiple of the interval. This will lock ticks to 'nicer' locations. For example, this will force the ticks to be at hours 0,6,12,18 when hourly ticking is done at 6 hour intervals. The AutoDateLocator has an interval dictionary that maps the frequency of the tick (a constant from dateutil.rrule) and a multiple allowed for that ticking. The default looks like this:: self.intervald = { YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500, 1000, 2000, 4000, 5000, 10000], MONTHLY : [1, 2, 3, 4, 6], DAILY : [1, 2, 3, 7, 14], HOURLY : [1, 2, 3, 4, 6, 12], MINUTELY: [1, 5, 10, 15, 30], SECONDLY: [1, 5, 10, 15, 30], MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000], } The interval is used to specify multiples that are appropriate for the frequency of ticking. For instance, every 7 days is sensible for daily ticks, but for minutes/seconds, 15 or 30 make sense. You can customize this dictionary by doing:: locator = AutoDateLocator() locator.intervald[HOURLY] = [3] # only show every 3 hours
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, tz=None, minticks=5, maxticks=None, interval_multiples=False): "\n *minticks* is the minimum number of ticks desired, which is used to\n select the type of ticking (yearly, monthly, etc.).\n\n *maxticks* is the maximum number of ticks desired, which controls\n any interval between ticks (ticking every other, every 3, etc.).\n For really fine-grained control, this can be a dictionary mapping\n individual rrule frequency constants (YEARLY, MONTHLY, etc.)\n to their own maximum number of ticks. This can be used to keep\n the number of ticks appropriate to the format chosen in\n :class:`AutoDateFormatter`. Any frequency not specified in this\n dictionary is given a default value.\n\n *tz* is a :class:`tzinfo` instance.\n\n *interval_multiples* is a boolean that indicates whether ticks\n should be chosen to be multiple of the interval. This will lock\n ticks to 'nicer' locations. For example, this will force the\n ticks to be at hours 0,6,12,18 when hourly ticking is done at\n 6 hour intervals.\n\n The AutoDateLocator has an interval dictionary that maps the\n frequency of the tick (a constant from dateutil.rrule) and a\n multiple allowed for that ticking. The default looks like this::\n\n self.intervald = {\n YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,\n 1000, 2000, 4000, 5000, 10000],\n MONTHLY : [1, 2, 3, 4, 6],\n DAILY : [1, 2, 3, 7, 14],\n HOURLY : [1, 2, 3, 4, 6, 12],\n MINUTELY: [1, 5, 10, 15, 30],\n SECONDLY: [1, 5, 10, 15, 30],\n MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,\n 5000, 10000, 20000, 50000, 100000, 200000, 500000,\n 1000000],\n }\n\n The interval is used to specify multiples that are appropriate for\n the frequency of ticking. For instance, every 7 days is sensible\n for daily ticks, but for minutes/seconds, 15 or 30 make sense.\n You can customize this dictionary by doing::\n\n locator = AutoDateLocator()\n locator.intervald[HOURLY] = [3] # only show every 3 hours\n " DateLocator.__init__(self, tz) self._locator = YearLocator() self._freq = YEARLY self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY, SECONDLY, MICROSECONDLY] self.minticks = minticks self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12, MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8} if (maxticks is not None): try: self.maxticks.update(maxticks) except TypeError: self.maxticks = dict(zip(self._freqs, ([maxticks] * len(self._freqs)))) self.interval_multiples = interval_multiples self.intervald = {YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500, 1000, 2000, 4000, 5000, 10000], MONTHLY: [1, 2, 3, 4, 6], DAILY: [1, 2, 3, 7, 14, 21], HOURLY: [1, 2, 3, 4, 6, 12], MINUTELY: [1, 5, 10, 15, 30], SECONDLY: [1, 5, 10, 15, 30], MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]} self._byranges = [None, range(1, 13), range(1, 32), range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self): 'Return the locations of the ticks' self.refresh() return self._locator()
-4,865,864,161,449,296,000
Return the locations of the ticks
env/lib/python2.7/site-packages/matplotlib/dates.py
__call__
rbalda/neural_ocr
python
def __call__(self): self.refresh() return self._locator()
def refresh(self): 'Refresh internal information based on current limits.' (dmin, dmax) = self.viewlim_to_dt() self._locator = self.get_locator(dmin, dmax)
3,994,028,736,872,974,300
Refresh internal information based on current limits.
env/lib/python2.7/site-packages/matplotlib/dates.py
refresh
rbalda/neural_ocr
python
def refresh(self): (dmin, dmax) = self.viewlim_to_dt() self._locator = self.get_locator(dmin, dmax)
def autoscale(self): 'Try to choose the view limits intelligently.' (dmin, dmax) = self.datalim_to_dt() self._locator = self.get_locator(dmin, dmax) return self._locator.autoscale()
4,944,211,996,552,428,000
Try to choose the view limits intelligently.
env/lib/python2.7/site-packages/matplotlib/dates.py
autoscale
rbalda/neural_ocr
python
def autoscale(self): (dmin, dmax) = self.datalim_to_dt() self._locator = self.get_locator(dmin, dmax) return self._locator.autoscale()
def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' delta = relativedelta(dmax, dmin) tdelta = (dmax - dmin) if (dmin > dmax): delta = (- delta) tdelta = (- tdelta) numYears = float(delta.years) numMonths = ((numYears * MONTHS_PER_YEAR) + delta.months) numDays = tdelta.days numHours = ((numDays * HOURS_PER_DAY) + delta.hours) numMinutes = ((numHours * MIN_PER_HOUR) + delta.minutes) numSeconds = np.floor(_total_seconds(tdelta)) numMicroseconds = np.floor((_total_seconds(tdelta) * 1000000.0)) nums = [numYears, numMonths, numDays, numHours, numMinutes, numSeconds, numMicroseconds] use_rrule_locator = (([True] * 6) + [False]) byranges = [None, 1, 1, 0, 0, 0, None] for (i, (freq, num)) in enumerate(zip(self._freqs, nums)): if (num < self.minticks): byranges[i] = None continue for interval in self.intervald[freq]: if (num <= (interval * (self.maxticks[freq] - 1))): break else: warnings.warn("AutoDateLocator was unable to pick an appropriate interval for this date range. It may be necessary to add an interval value to the AutoDateLocator's intervald dictionary. Defaulting to {0}.".format(interval)) self._freq = freq if (self._byranges[i] and self.interval_multiples): byranges[i] = self._byranges[i][::interval] interval = 1 else: byranges[i] = self._byranges[i] break else: raise ValueError('No sensible date limit could be found in the AutoDateLocator.') if use_rrule_locator[i]: (_, bymonth, bymonthday, byhour, byminute, bysecond, _) = byranges rrule = rrulewrapper(self._freq, interval=interval, dtstart=dmin, until=dmax, bymonth=bymonth, bymonthday=bymonthday, byhour=byhour, byminute=byminute, bysecond=bysecond) locator = RRuleLocator(rrule, self.tz) else: locator = MicrosecondLocator(interval, tz=self.tz) locator.set_axis(self.axis) locator.set_view_interval(*self.axis.get_view_interval()) locator.set_data_interval(*self.axis.get_data_interval()) return locator
5,123,982,612,934,154,000
Pick the best locator based on a distance.
env/lib/python2.7/site-packages/matplotlib/dates.py
get_locator
rbalda/neural_ocr
python
def get_locator(self, dmin, dmax): delta = relativedelta(dmax, dmin) tdelta = (dmax - dmin) if (dmin > dmax): delta = (- delta) tdelta = (- tdelta) numYears = float(delta.years) numMonths = ((numYears * MONTHS_PER_YEAR) + delta.months) numDays = tdelta.days numHours = ((numDays * HOURS_PER_DAY) + delta.hours) numMinutes = ((numHours * MIN_PER_HOUR) + delta.minutes) numSeconds = np.floor(_total_seconds(tdelta)) numMicroseconds = np.floor((_total_seconds(tdelta) * 1000000.0)) nums = [numYears, numMonths, numDays, numHours, numMinutes, numSeconds, numMicroseconds] use_rrule_locator = (([True] * 6) + [False]) byranges = [None, 1, 1, 0, 0, 0, None] for (i, (freq, num)) in enumerate(zip(self._freqs, nums)): if (num < self.minticks): byranges[i] = None continue for interval in self.intervald[freq]: if (num <= (interval * (self.maxticks[freq] - 1))): break else: warnings.warn("AutoDateLocator was unable to pick an appropriate interval for this date range. It may be necessary to add an interval value to the AutoDateLocator's intervald dictionary. Defaulting to {0}.".format(interval)) self._freq = freq if (self._byranges[i] and self.interval_multiples): byranges[i] = self._byranges[i][::interval] interval = 1 else: byranges[i] = self._byranges[i] break else: raise ValueError('No sensible date limit could be found in the AutoDateLocator.') if use_rrule_locator[i]: (_, bymonth, bymonthday, byhour, byminute, bysecond, _) = byranges rrule = rrulewrapper(self._freq, interval=interval, dtstart=dmin, until=dmax, bymonth=bymonth, bymonthday=bymonthday, byhour=byhour, byminute=byminute, bysecond=bysecond) locator = RRuleLocator(rrule, self.tz) else: locator = MicrosecondLocator(interval, tz=self.tz) locator.set_axis(self.axis) locator.set_view_interval(*self.axis.get_view_interval()) locator.set_data_interval(*self.axis.get_data_interval()) return locator
def __init__(self, base=1, month=1, day=1, tz=None): '\n Mark years that are multiple of base on a given month and day\n (default jan 1).\n ' DateLocator.__init__(self, tz) self.base = ticker.Base(base) self.replaced = {'month': month, 'day': day, 'hour': 0, 'minute': 0, 'second': 0, 'tzinfo': tz}
2,204,420,914,898,779,400
Mark years that are multiple of base on a given month and day (default jan 1).
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, base=1, month=1, day=1, tz=None): '\n Mark years that are multiple of base on a given month and day\n (default jan 1).\n ' DateLocator.__init__(self, tz) self.base = ticker.Base(base) self.replaced = {'month': month, 'day': day, 'hour': 0, 'minute': 0, 'second': 0, 'tzinfo': tz}
def autoscale(self): '\n Set the view limits to include the data range.\n ' (dmin, dmax) = self.datalim_to_dt() ymin = self.base.le(dmin.year) ymax = self.base.ge(dmax.year) vmin = dmin.replace(year=ymin, **self.replaced) vmax = dmax.replace(year=ymax, **self.replaced) vmin = date2num(vmin) vmax = date2num(vmax) return self.nonsingular(vmin, vmax)
-3,630,744,519,156,373,500
Set the view limits to include the data range.
env/lib/python2.7/site-packages/matplotlib/dates.py
autoscale
rbalda/neural_ocr
python
def autoscale(self): '\n \n ' (dmin, dmax) = self.datalim_to_dt() ymin = self.base.le(dmin.year) ymax = self.base.ge(dmax.year) vmin = dmin.replace(year=ymin, **self.replaced) vmax = dmax.replace(year=ymax, **self.replaced) vmin = date2num(vmin) vmax = date2num(vmax) return self.nonsingular(vmin, vmax)
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None): '\n Mark every month in *bymonth*; *bymonth* can be an int or\n sequence. Default is ``range(1,13)``, i.e. every month.\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurance.\n ' if (bymonth is None): bymonth = range(1, 13) elif isinstance(bymonth, np.ndarray): bymonth = [x.item() for x in bymonth.astype(int)] rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
-7,672,659,490,995,540,000
Mark every month in *bymonth*; *bymonth* can be an int or sequence. Default is ``range(1,13)``, i.e. every month. *interval* is the interval between each iteration. For example, if ``interval=2``, mark every second occurance.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None): '\n Mark every month in *bymonth*; *bymonth* can be an int or\n sequence. Default is ``range(1,13)``, i.e. every month.\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurance.\n ' if (bymonth is None): bymonth = range(1, 13) elif isinstance(bymonth, np.ndarray): bymonth = [x.item() for x in bymonth.astype(int)] rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
def __init__(self, byweekday=1, interval=1, tz=None): '\n Mark every weekday in *byweekday*; *byweekday* can be a number or\n sequence.\n\n Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,\n SU, the constants from :mod:`dateutil.rrule`, which have been\n imported into the :mod:`matplotlib.dates` namespace.\n\n *interval* specifies the number of weeks to skip. For example,\n ``interval=2`` plots every second week.\n ' if isinstance(byweekday, np.ndarray): [x.item() for x in byweekday.astype(int)] rule = rrulewrapper(DAILY, byweekday=byweekday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
-6,815,969,511,609,640,000
Mark every weekday in *byweekday*; *byweekday* can be a number or sequence. Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA, SU, the constants from :mod:`dateutil.rrule`, which have been imported into the :mod:`matplotlib.dates` namespace. *interval* specifies the number of weeks to skip. For example, ``interval=2`` plots every second week.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, byweekday=1, interval=1, tz=None): '\n Mark every weekday in *byweekday*; *byweekday* can be a number or\n sequence.\n\n Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,\n SU, the constants from :mod:`dateutil.rrule`, which have been\n imported into the :mod:`matplotlib.dates` namespace.\n\n *interval* specifies the number of weeks to skip. For example,\n ``interval=2`` plots every second week.\n ' if isinstance(byweekday, np.ndarray): [x.item() for x in byweekday.astype(int)] rule = rrulewrapper(DAILY, byweekday=byweekday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
def __init__(self, bymonthday=None, interval=1, tz=None): '\n Mark every day in *bymonthday*; *bymonthday* can be an int or\n sequence.\n\n Default is to tick every day of the month: ``bymonthday=range(1,32)``\n ' if (bymonthday is None): bymonthday = range(1, 32) elif isinstance(bymonthday, np.ndarray): bymonthday = [x.item() for x in bymonthday.astype(int)] rule = rrulewrapper(DAILY, bymonthday=bymonthday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
5,274,080,067,483,947,000
Mark every day in *bymonthday*; *bymonthday* can be an int or sequence. Default is to tick every day of the month: ``bymonthday=range(1,32)``
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, bymonthday=None, interval=1, tz=None): '\n Mark every day in *bymonthday*; *bymonthday* can be an int or\n sequence.\n\n Default is to tick every day of the month: ``bymonthday=range(1,32)``\n ' if (bymonthday is None): bymonthday = range(1, 32) elif isinstance(bymonthday, np.ndarray): bymonthday = [x.item() for x in bymonthday.astype(int)] rule = rrulewrapper(DAILY, bymonthday=bymonthday, interval=interval, **self.hms0d) RRuleLocator.__init__(self, rule, tz)
def __init__(self, byhour=None, interval=1, tz=None): '\n Mark every hour in *byhour*; *byhour* can be an int or sequence.\n Default is to tick every hour: ``byhour=range(24)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n ' if (byhour is None): byhour = range(24) rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval, byminute=0, bysecond=0) RRuleLocator.__init__(self, rule, tz)
-941,876,939,098,996,500
Mark every hour in *byhour*; *byhour* can be an int or sequence. Default is to tick every hour: ``byhour=range(24)`` *interval* is the interval between each iteration. For example, if ``interval=2``, mark every second occurrence.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, byhour=None, interval=1, tz=None): '\n Mark every hour in *byhour*; *byhour* can be an int or sequence.\n Default is to tick every hour: ``byhour=range(24)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n ' if (byhour is None): byhour = range(24) rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval, byminute=0, bysecond=0) RRuleLocator.__init__(self, rule, tz)
def __init__(self, byminute=None, interval=1, tz=None): '\n Mark every minute in *byminute*; *byminute* can be an int or\n sequence. Default is to tick every minute: ``byminute=range(60)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n ' if (byminute is None): byminute = range(60) rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval, bysecond=0) RRuleLocator.__init__(self, rule, tz)
296,218,444,658,126,300
Mark every minute in *byminute*; *byminute* can be an int or sequence. Default is to tick every minute: ``byminute=range(60)`` *interval* is the interval between each iteration. For example, if ``interval=2``, mark every second occurrence.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, byminute=None, interval=1, tz=None): '\n Mark every minute in *byminute*; *byminute* can be an int or\n sequence. Default is to tick every minute: ``byminute=range(60)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n ' if (byminute is None): byminute = range(60) rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval, bysecond=0) RRuleLocator.__init__(self, rule, tz)
def __init__(self, bysecond=None, interval=1, tz=None): '\n Mark every second in *bysecond*; *bysecond* can be an int or\n sequence. Default is to tick every second: ``bysecond = range(60)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n\n ' if (bysecond is None): bysecond = range(60) rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval) RRuleLocator.__init__(self, rule, tz)
-8,813,133,927,261,645,000
Mark every second in *bysecond*; *bysecond* can be an int or sequence. Default is to tick every second: ``bysecond = range(60)`` *interval* is the interval between each iteration. For example, if ``interval=2``, mark every second occurrence.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, bysecond=None, interval=1, tz=None): '\n Mark every second in *bysecond*; *bysecond* can be an int or\n sequence. Default is to tick every second: ``bysecond = range(60)``\n\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second occurrence.\n\n ' if (bysecond is None): bysecond = range(60) rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval) RRuleLocator.__init__(self, rule, tz)
def __init__(self, interval=1, tz=None): '\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second microsecond.\n\n ' self._interval = interval self._wrapped_locator = ticker.MultipleLocator(interval) self.tz = tz
2,253,424,607,567,393,000
*interval* is the interval between each iteration. For example, if ``interval=2``, mark every second microsecond.
env/lib/python2.7/site-packages/matplotlib/dates.py
__init__
rbalda/neural_ocr
python
def __init__(self, interval=1, tz=None): '\n *interval* is the interval between each iteration. For\n example, if ``interval=2``, mark every second microsecond.\n\n ' self._interval = interval self._wrapped_locator = ticker.MultipleLocator(interval) self.tz = tz
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' return (1.0 / MUSECONDS_PER_DAY)
-8,244,895,826,175,361,000
Return how many days a unit of the locator is; used for intelligent autoscaling.
env/lib/python2.7/site-packages/matplotlib/dates.py
_get_unit
rbalda/neural_ocr
python
def _get_unit(self): '\n Return how many days a unit of the locator is; used for\n intelligent autoscaling.\n ' return (1.0 / MUSECONDS_PER_DAY)
def _get_interval(self): '\n Return the number of units for each tick.\n ' return self._interval
8,565,982,526,005,957,000
Return the number of units for each tick.
env/lib/python2.7/site-packages/matplotlib/dates.py
_get_interval
rbalda/neural_ocr
python
def _get_interval(self): '\n \n ' return self._interval
@staticmethod def axisinfo(unit, axis): '\n Return the :class:`~matplotlib.units.AxisInfo` for *unit*.\n\n *unit* is a tzinfo instance or None.\n The *axis* argument is required but not used.\n ' tz = unit majloc = AutoDateLocator(tz=tz) majfmt = AutoDateFormatter(majloc, tz=tz) datemin = datetime.date(2000, 1, 1) datemax = datetime.date(2010, 1, 1) return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax))
-3,760,185,637,134,109,700
Return the :class:`~matplotlib.units.AxisInfo` for *unit*. *unit* is a tzinfo instance or None. The *axis* argument is required but not used.
env/lib/python2.7/site-packages/matplotlib/dates.py
axisinfo
rbalda/neural_ocr
python
@staticmethod def axisinfo(unit, axis): '\n Return the :class:`~matplotlib.units.AxisInfo` for *unit*.\n\n *unit* is a tzinfo instance or None.\n The *axis* argument is required but not used.\n ' tz = unit majloc = AutoDateLocator(tz=tz) majfmt = AutoDateFormatter(majloc, tz=tz) datemin = datetime.date(2000, 1, 1) datemax = datetime.date(2010, 1, 1) return units.AxisInfo(majloc=majloc, majfmt=majfmt, label=, default_limits=(datemin, datemax))
@staticmethod def convert(value, unit, axis): '\n If *value* is not already a number or sequence of numbers,\n convert it with :func:`date2num`.\n\n The *unit* and *axis* arguments are not used.\n ' if units.ConversionInterface.is_numlike(value): return value return date2num(value)
1,106,963,852,552,765,200
If *value* is not already a number or sequence of numbers, convert it with :func:`date2num`. The *unit* and *axis* arguments are not used.
env/lib/python2.7/site-packages/matplotlib/dates.py
convert
rbalda/neural_ocr
python
@staticmethod def convert(value, unit, axis): '\n If *value* is not already a number or sequence of numbers,\n convert it with :func:`date2num`.\n\n The *unit* and *axis* arguments are not used.\n ' if units.ConversionInterface.is_numlike(value): return value return date2num(value)
@staticmethod def default_units(x, axis): '\n Return the tzinfo instance of *x* or of its first element, or None\n ' if isinstance(x, np.ndarray): x = x.ravel() try: x = cbook.safe_first_element(x) except (TypeError, StopIteration): pass try: return x.tzinfo except AttributeError: pass return None
8,571,303,660,489,029,000
Return the tzinfo instance of *x* or of its first element, or None
env/lib/python2.7/site-packages/matplotlib/dates.py
default_units
rbalda/neural_ocr
python
@staticmethod def default_units(x, axis): '\n \n ' if isinstance(x, np.ndarray): x = x.ravel() try: x = cbook.safe_first_element(x) except (TypeError, StopIteration): pass try: return x.tzinfo except AttributeError: pass return None
def _total_seconds(tdelta): '\n Alias providing support for datetime.timedelta.total_seconds() function\n calls even in Python < 2.7.\n\n The input `tdelta` is a datetime.timedelta object, and returns a float\n containing the total number of seconds representing the `tdelta`\n duration. For large durations (> 270 on most platforms), this loses\n microsecond accuracy.\n ' return ((tdelta.microseconds + ((tdelta.seconds + (tdelta.days * SEC_PER_DAY)) * 1000000.0)) * 1e-06)
8,408,382,371,278,128,000
Alias providing support for datetime.timedelta.total_seconds() function calls even in Python < 2.7. The input `tdelta` is a datetime.timedelta object, and returns a float containing the total number of seconds representing the `tdelta` duration. For large durations (> 270 on most platforms), this loses microsecond accuracy.
env/lib/python2.7/site-packages/matplotlib/dates.py
_total_seconds
rbalda/neural_ocr
python
def _total_seconds(tdelta): '\n Alias providing support for datetime.timedelta.total_seconds() function\n calls even in Python < 2.7.\n\n The input `tdelta` is a datetime.timedelta object, and returns a float\n containing the total number of seconds representing the `tdelta`\n duration. For large durations (> 270 on most platforms), this loses\n microsecond accuracy.\n ' return ((tdelta.microseconds + ((tdelta.seconds + (tdelta.days * SEC_PER_DAY)) * 1000000.0)) * 1e-06)
def create_hparams(experiment): 'Creates the hyper parameters.' hparams = {} hparams['batch_size'] = 64 hparams['eval_batch_size'] = 64 hparams['learning_rate_warmup_steps'] = 2000 hparams['learning_rate_constant'] = 1 hparams['learning_rate'] = 0.001 hparams['train_epoches'] = 20 hparams['steps_per_epoch'] = 30 hparams['train_steps'] = (100 * 1000) hparams['eval_steps'] = 100 hparams['caption_optimizer'] = 't2t' hparams['clip_norm'] = 5.0 hparams['widget_encoder_checkpoint'] = '' hparams['train_files'] = '' hparams['eval_files'] = '' hparams['train_buffer_size'] = 2000 hparams['eval_buffer_size'] = 500 hparams['train_pixel_encoder'] = True hparams['debug'] = False hparams['distribution_strategy'] = 'mirrored' hparams['decoding_task'] = True hparams['classification_task'] = False hparams['use_decoding_for_classification'] = False hparams['classification_loss_weight'] = 1 hparams['train_with_one_node'] = False hparams['embedding_file'] = '' hparams['word_vocab_path'] = '' hparams['glove_trainable'] = True hparams['vocab_size'] = 10000 hparams['phrase_vocab_size'] = 10000 hparams['max_pixel_pos'] = 100 hparams['max_dom_pos'] = 500 hparams['screen_encoder'] = 'gcn' hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom'] hparams['obj_text_aggregation'] = 'max' hparams['synthetic_screen_noise'] = 0.0 hparams['encode_screen_with_context'] = False hparams['add_pixel_skip_link'] = False hparams['num_hidden_layers'] = 2 hparams['hidden_size'] = 2 hparams['filter_size'] = 2 hparams['num_heads'] = 2 hparams['dropout'] = 0.2 hparams['layer_prepostprocess_dropout'] = 0.2 hparams['attention_dropout'] = 0.2 hparams['relu_dropout'] = 0.2 transformer_hparams = model_params.BASE_PARAMS hparams.update(transformer_hparams) config = widget_caption_config.experiments[experiment] hparams.update(config) return hparams
6,100,350,083,624,158,000
Creates the hyper parameters.
widget_caption/widget_caption_model.py
create_hparams
AI21212019/google-research
python
def create_hparams(experiment): hparams = {} hparams['batch_size'] = 64 hparams['eval_batch_size'] = 64 hparams['learning_rate_warmup_steps'] = 2000 hparams['learning_rate_constant'] = 1 hparams['learning_rate'] = 0.001 hparams['train_epoches'] = 20 hparams['steps_per_epoch'] = 30 hparams['train_steps'] = (100 * 1000) hparams['eval_steps'] = 100 hparams['caption_optimizer'] = 't2t' hparams['clip_norm'] = 5.0 hparams['widget_encoder_checkpoint'] = hparams['train_files'] = hparams['eval_files'] = hparams['train_buffer_size'] = 2000 hparams['eval_buffer_size'] = 500 hparams['train_pixel_encoder'] = True hparams['debug'] = False hparams['distribution_strategy'] = 'mirrored' hparams['decoding_task'] = True hparams['classification_task'] = False hparams['use_decoding_for_classification'] = False hparams['classification_loss_weight'] = 1 hparams['train_with_one_node'] = False hparams['embedding_file'] = hparams['word_vocab_path'] = hparams['glove_trainable'] = True hparams['vocab_size'] = 10000 hparams['phrase_vocab_size'] = 10000 hparams['max_pixel_pos'] = 100 hparams['max_dom_pos'] = 500 hparams['screen_encoder'] = 'gcn' hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom'] hparams['obj_text_aggregation'] = 'max' hparams['synthetic_screen_noise'] = 0.0 hparams['encode_screen_with_context'] = False hparams['add_pixel_skip_link'] = False hparams['num_hidden_layers'] = 2 hparams['hidden_size'] = 2 hparams['filter_size'] = 2 hparams['num_heads'] = 2 hparams['dropout'] = 0.2 hparams['layer_prepostprocess_dropout'] = 0.2 hparams['attention_dropout'] = 0.2 hparams['relu_dropout'] = 0.2 transformer_hparams = model_params.BASE_PARAMS hparams.update(transformer_hparams) config = widget_caption_config.experiments[experiment] hparams.update(config) return hparams
def load_embed(file_name, vocab_size): 'Loads a pre-trained embedding matrix.\n\n Args:\n file_name: the file name of the embedding file.\n vocab_size: if > 0, only load embedding weights for vocab_size words.\n\n Returns:\n vocab: a list of tokens.\n embeds: a numpy array of embeddings for each token plus an OOV embedding.\n depth: the depth of the embedding.\n Raises:\n ValueError: embeddings have different depths.\n ' with tf.io.gfile.GFile(file_name, 'r') as embed_file: vocab = [] embeds = [] depth = (- 1) for (index, line) in enumerate(embed_file): if ((vocab_size > 0) and (index >= vocab_size)): break line = line.strip() tokens = line.strip().split(' ') word = tokens[0] vocab.append(word) if (depth == (- 1)): embed = [float(token) for token in tokens[1:]] else: embed = [float(token) for token in tokens[(- depth):]] d = len(embed) if (depth == (- 1)): depth = d if (d != depth): raise ValueError('Inconsistent embedding sizes') embeds.append(embed) embeds = np.stack(embeds) return (vocab, embeds, depth)
8,892,763,666,272,563,000
Loads a pre-trained embedding matrix. Args: file_name: the file name of the embedding file. vocab_size: if > 0, only load embedding weights for vocab_size words. Returns: vocab: a list of tokens. embeds: a numpy array of embeddings for each token plus an OOV embedding. depth: the depth of the embedding. Raises: ValueError: embeddings have different depths.
widget_caption/widget_caption_model.py
load_embed
AI21212019/google-research
python
def load_embed(file_name, vocab_size): 'Loads a pre-trained embedding matrix.\n\n Args:\n file_name: the file name of the embedding file.\n vocab_size: if > 0, only load embedding weights for vocab_size words.\n\n Returns:\n vocab: a list of tokens.\n embeds: a numpy array of embeddings for each token plus an OOV embedding.\n depth: the depth of the embedding.\n Raises:\n ValueError: embeddings have different depths.\n ' with tf.io.gfile.GFile(file_name, 'r') as embed_file: vocab = [] embeds = [] depth = (- 1) for (index, line) in enumerate(embed_file): if ((vocab_size > 0) and (index >= vocab_size)): break line = line.strip() tokens = line.strip().split(' ') word = tokens[0] vocab.append(word) if (depth == (- 1)): embed = [float(token) for token in tokens[1:]] else: embed = [float(token) for token in tokens[(- depth):]] d = len(embed) if (depth == (- 1)): depth = d if (d != depth): raise ValueError('Inconsistent embedding sizes') embeds.append(embed) embeds = np.stack(embeds) return (vocab, embeds, depth)
def compute_score(predictions, references, vocab=None): 'Computes the bleu score.\n\n Args:\n predictions: a numpy arrary in the shape of [batch_size, max_phrase_length]\n references: a numpy array in the shape of [batch_size, 7, 10]\n vocab: the vocabulary file.\n\n Returns:\n a scalar value for the corpus level bleu score.\n ' assert (np.rank(predictions) == 2) assert (predictions.shape[0] == references.shape[0]) batch_size = predictions.shape[0] predictions = tf.make_ndarray(tf.make_tensor_proto(predictions)).tolist() references = tf.make_ndarray(tf.make_tensor_proto(references)).tolist() hypotheses_list = [] references_list = [] for index in range(batch_size): h = predictions[index] try: eos_index = h.index(input_utils.EOS) except ValueError: eos_index = len(h) hypotheses_list.append(h[:eos_index]) ref = references[index].decode().split('|') ref_list = [r.strip().split(' ') for r in ref if r.strip()] references_list.append(ref_list) all_scores = collections.defaultdict(list) for (hypothesis, references) in zip(hypotheses_list, references_list): if ((vocab is not None) and len(vocab)): hypothesis = [vocab[word_id].numpy().decode() for word_id in hypothesis if (word_id > 3)] logging.info('hypothesis: %s', str(hypothesis)) logging.info('references: %s', str(references)) h_str = ' '.join((str(e) for e in hypothesis)) r_str = [' '.join((str(e) for e in ref)) for ref in references] scores = widget_caption_eval.coco_evaluate(r_str, h_str) for (key, score) in scores.items(): all_scores[key].append(score) score_names = ['BLEU-1', 'BLEU-2', 'BLEU-3', 'BLEU-4', 'ROUGE-1-f1-mean', 'ROUGE-1-f1-min', 'ROUGE-1-f1-max', 'ROUGE-2-f1-mean', 'ROUGE-2-f1-min', 'ROUGE-2-f1-max', 'ROUGE-L-f1-mean', 'ROUGE-L-f1-min', 'ROUGE-L-f1-max'] return [np.array(all_scores[name], dtype=np.float32) for name in score_names]
2,865,074,634,727,857,700
Computes the bleu score. Args: predictions: a numpy arrary in the shape of [batch_size, max_phrase_length] references: a numpy array in the shape of [batch_size, 7, 10] vocab: the vocabulary file. Returns: a scalar value for the corpus level bleu score.
widget_caption/widget_caption_model.py
compute_score
AI21212019/google-research
python
def compute_score(predictions, references, vocab=None): 'Computes the bleu score.\n\n Args:\n predictions: a numpy arrary in the shape of [batch_size, max_phrase_length]\n references: a numpy array in the shape of [batch_size, 7, 10]\n vocab: the vocabulary file.\n\n Returns:\n a scalar value for the corpus level bleu score.\n ' assert (np.rank(predictions) == 2) assert (predictions.shape[0] == references.shape[0]) batch_size = predictions.shape[0] predictions = tf.make_ndarray(tf.make_tensor_proto(predictions)).tolist() references = tf.make_ndarray(tf.make_tensor_proto(references)).tolist() hypotheses_list = [] references_list = [] for index in range(batch_size): h = predictions[index] try: eos_index = h.index(input_utils.EOS) except ValueError: eos_index = len(h) hypotheses_list.append(h[:eos_index]) ref = references[index].decode().split('|') ref_list = [r.strip().split(' ') for r in ref if r.strip()] references_list.append(ref_list) all_scores = collections.defaultdict(list) for (hypothesis, references) in zip(hypotheses_list, references_list): if ((vocab is not None) and len(vocab)): hypothesis = [vocab[word_id].numpy().decode() for word_id in hypothesis if (word_id > 3)] logging.info('hypothesis: %s', str(hypothesis)) logging.info('references: %s', str(references)) h_str = ' '.join((str(e) for e in hypothesis)) r_str = [' '.join((str(e) for e in ref)) for ref in references] scores = widget_caption_eval.coco_evaluate(r_str, h_str) for (key, score) in scores.items(): all_scores[key].append(score) score_names = ['BLEU-1', 'BLEU-2', 'BLEU-3', 'BLEU-4', 'ROUGE-1-f1-mean', 'ROUGE-1-f1-min', 'ROUGE-1-f1-max', 'ROUGE-2-f1-mean', 'ROUGE-2-f1-min', 'ROUGE-2-f1-max', 'ROUGE-L-f1-mean', 'ROUGE-L-f1-min', 'ROUGE-L-f1-max'] return [np.array(all_scores[name], dtype=np.float32) for name in score_names]
def init_resnet(hparams, model): 'Init resnet weights from a TF model if provided.' if (not hparams['widget_encoder_checkpoint']): return reader = tf.train.load_checkpoint(hparams['widget_encoder_checkpoint']) init_set = input_utils.input_fn(hparams['train_files'], 1, hparams['vocab_size'], hparams['max_pixel_pos'], hparams['max_dom_pos'], epoches=1, buffer_size=1) init_features = next(iter(init_set)) init_target = model.compute_targets(init_features) model([init_features, init_target[0]], training=True) weight_value_tuples = [] for layer in model._encoder._pixel_layers: for param in layer.weights: if ('batch_normalization' in param.name): continue (sublayer, varname) = param.name.replace(':0', '').split('/')[(- 2):] var_name = 'encoder/{}/{}'.format(sublayer, varname) if reader.has_tensor(var_name): logging.info('Found pretrained weights: %s %s, %s %s', param.name, param.shape, var_name, reader.get_tensor(var_name).shape) weight_value_tuples.append((param, reader.get_tensor(var_name))) logging.info('Load pretrained %s weights', len(weight_value_tuples)) tf.keras.backend.batch_set_value(weight_value_tuples)
-5,117,677,739,210,213,000
Init resnet weights from a TF model if provided.
widget_caption/widget_caption_model.py
init_resnet
AI21212019/google-research
python
def init_resnet(hparams, model): if (not hparams['widget_encoder_checkpoint']): return reader = tf.train.load_checkpoint(hparams['widget_encoder_checkpoint']) init_set = input_utils.input_fn(hparams['train_files'], 1, hparams['vocab_size'], hparams['max_pixel_pos'], hparams['max_dom_pos'], epoches=1, buffer_size=1) init_features = next(iter(init_set)) init_target = model.compute_targets(init_features) model([init_features, init_target[0]], training=True) weight_value_tuples = [] for layer in model._encoder._pixel_layers: for param in layer.weights: if ('batch_normalization' in param.name): continue (sublayer, varname) = param.name.replace(':0', ).split('/')[(- 2):] var_name = 'encoder/{}/{}'.format(sublayer, varname) if reader.has_tensor(var_name): logging.info('Found pretrained weights: %s %s, %s %s', param.name, param.shape, var_name, reader.get_tensor(var_name).shape) weight_value_tuples.append((param, reader.get_tensor(var_name))) logging.info('Load pretrained %s weights', len(weight_value_tuples)) tf.keras.backend.batch_set_value(weight_value_tuples)
def call(self, input_tensor, training, dropout=0.0): 'Defines a single encoding layer.' x = input_tensor skip = x x = self._conv_layer_1(x) x = self._batch_norm_layer_1(x, training=training) x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) x = self._conv_layer_2(x) x = self._batch_norm_layer_2(x, training=training) x += skip x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) x = self._conv_layer_3(x) x = self._batch_norm_layer_3(x, training=training) x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) return x
6,565,116,154,512,076,000
Defines a single encoding layer.
widget_caption/widget_caption_model.py
call
AI21212019/google-research
python
def call(self, input_tensor, training, dropout=0.0): x = input_tensor skip = x x = self._conv_layer_1(x) x = self._batch_norm_layer_1(x, training=training) x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) x = self._conv_layer_2(x) x = self._batch_norm_layer_2(x, training=training) x += skip x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) x = self._conv_layer_3(x) x = self._batch_norm_layer_3(x, training=training) x = tf.nn.relu(x) if training: x = tf.nn.dropout(x, rate=dropout) return x
def _get_encoder3(self, initial_channel_size=3): 'Defines the encoding model with a pre-defined filter/kernel sizes.' pixel_layers = [] filter_groups = [[initial_channel_size, initial_channel_size, 4], [4, 4, 16], [16, 16, 32], [32, 32, 64], [64, 64, 128], [128, 128, 256]] kernel_size_groups = [[5, 3, 5], [5, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]] for (index, (filters, kernel_sizes)) in enumerate(zip(filter_groups, kernel_size_groups)): assert (len(filters) == len(kernel_sizes)) name = 'pixel_encoder_{}'.format(index) layer = PixelEncoderLayer(name, filters, kernel_sizes) pixel_layers.append(layer) return pixel_layers
9,135,900,090,690,072,000
Defines the encoding model with a pre-defined filter/kernel sizes.
widget_caption/widget_caption_model.py
_get_encoder3
AI21212019/google-research
python
def _get_encoder3(self, initial_channel_size=3): pixel_layers = [] filter_groups = [[initial_channel_size, initial_channel_size, 4], [4, 4, 16], [16, 16, 32], [32, 32, 64], [64, 64, 128], [128, 128, 256]] kernel_size_groups = [[5, 3, 5], [5, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]] for (index, (filters, kernel_sizes)) in enumerate(zip(filter_groups, kernel_size_groups)): assert (len(filters) == len(kernel_sizes)) name = 'pixel_encoder_{}'.format(index) layer = PixelEncoderLayer(name, filters, kernel_sizes) pixel_layers.append(layer) return pixel_layers
def _embed_composite_feature(self, features, embedding_layers): 'Embed a position feature.' embedding_list = [] for i in range(len(embedding_layers)): embedding_list.append(embedding_layers[i](features[:, :, i])) embedding = tf.add_n(embedding_list) return embedding
-2,585,779,683,394,142,700
Embed a position feature.
widget_caption/widget_caption_model.py
_embed_composite_feature
AI21212019/google-research
python
def _embed_composite_feature(self, features, embedding_layers): embedding_list = [] for i in range(len(embedding_layers)): embedding_list.append(embedding_layers[i](features[:, :, i])) embedding = tf.add_n(embedding_list) return embedding
def _encode_view_hierarchy(self, features, object_selector, training): 'Encodes view hierarchy.' logging.info('Using Transformer screen encoder') developer_embeddings = self._word_embedding_layer(features['developer_token_id']) resource_embeddings = self._word_embedding_layer(features['resource_token_id']) developer_embeddings = self._aggregate_text_embedding(features['developer_token_id'], developer_embeddings) resource_embeddings = self._aggregate_text_embedding(features['resource_token_id'], resource_embeddings) type_embedding = self._type_embedding_layer(tf.maximum(features['obj_type'], 0)) clickable_embedding = self._clickable_embedding_layer(features['obj_clickable']) object_info = [] if ('text' in self._hparams['screen_embedding_feature']): object_info.append(developer_embeddings) object_info.append(resource_embeddings) if ('type' in self._hparams['screen_embedding_feature']): object_info.append(type_embedding) if ('pos' in self._hparams['screen_embedding_feature']): pos_embedding = self._embed_composite_feature(features['obj_screen_pos'], self._pos_embedding_layers) object_info.append(pos_embedding) if ('click' in self._hparams['screen_embedding_feature']): object_info.append(clickable_embedding) if ('dom' in self._hparams['screen_embedding_feature']): dom_embedding = self._embed_composite_feature(features['obj_dom_pos'], self._dom_embedding_layers) object_info.append(dom_embedding) object_embed = tf.concat(object_info, (- 1)) object_embed = self._vh_final_layer(object_embed) object_mask = tf.cast(tf.not_equal(features['obj_type'], (- 1)), tf.float32) object_embed = (object_embed * tf.expand_dims(object_mask, (- 1))) att_bias = model_utils.get_padding_bias(object_mask) if training: object_embed = tf.nn.dropout(object_embed, rate=self._hparams['dropout']) encoder_output = self._transformer_encoder(object_embed, attention_bias=att_bias, inputs_padding=None, training=training) object_embed = tf.reshape(object_embed, [(- 1), self._hparams['hidden_size']]) encoder_output = tf.reshape(encoder_output, [(- 1), self._hparams['hidden_size']]) valid_object_embed = tf.gather(object_embed, object_selector) valid_screen_encoding = tf.gather(encoder_output, object_selector) return (valid_screen_encoding, valid_object_embed)
5,191,364,270,785,644,000
Encodes view hierarchy.
widget_caption/widget_caption_model.py
_encode_view_hierarchy
AI21212019/google-research
python
def _encode_view_hierarchy(self, features, object_selector, training): logging.info('Using Transformer screen encoder') developer_embeddings = self._word_embedding_layer(features['developer_token_id']) resource_embeddings = self._word_embedding_layer(features['resource_token_id']) developer_embeddings = self._aggregate_text_embedding(features['developer_token_id'], developer_embeddings) resource_embeddings = self._aggregate_text_embedding(features['resource_token_id'], resource_embeddings) type_embedding = self._type_embedding_layer(tf.maximum(features['obj_type'], 0)) clickable_embedding = self._clickable_embedding_layer(features['obj_clickable']) object_info = [] if ('text' in self._hparams['screen_embedding_feature']): object_info.append(developer_embeddings) object_info.append(resource_embeddings) if ('type' in self._hparams['screen_embedding_feature']): object_info.append(type_embedding) if ('pos' in self._hparams['screen_embedding_feature']): pos_embedding = self._embed_composite_feature(features['obj_screen_pos'], self._pos_embedding_layers) object_info.append(pos_embedding) if ('click' in self._hparams['screen_embedding_feature']): object_info.append(clickable_embedding) if ('dom' in self._hparams['screen_embedding_feature']): dom_embedding = self._embed_composite_feature(features['obj_dom_pos'], self._dom_embedding_layers) object_info.append(dom_embedding) object_embed = tf.concat(object_info, (- 1)) object_embed = self._vh_final_layer(object_embed) object_mask = tf.cast(tf.not_equal(features['obj_type'], (- 1)), tf.float32) object_embed = (object_embed * tf.expand_dims(object_mask, (- 1))) att_bias = model_utils.get_padding_bias(object_mask) if training: object_embed = tf.nn.dropout(object_embed, rate=self._hparams['dropout']) encoder_output = self._transformer_encoder(object_embed, attention_bias=att_bias, inputs_padding=None, training=training) object_embed = tf.reshape(object_embed, [(- 1), self._hparams['hidden_size']]) encoder_output = tf.reshape(encoder_output, [(- 1), self._hparams['hidden_size']]) valid_object_embed = tf.gather(object_embed, object_selector) valid_screen_encoding = tf.gather(encoder_output, object_selector) return (valid_screen_encoding, valid_object_embed)
def _aggregate_text_embedding(self, token_ids, embeddings): 'Aggregate text embedding for a UI element.' if (self._hparams['obj_text_aggregation'] == 'max'): valid_token_mask = tf.greater_equal(token_ids, 4) invalid_token_bias = (tf.cast(tf.logical_not(valid_token_mask), tf.float32) * (- 1000000000.0)) embeddings = (embeddings + tf.expand_dims(invalid_token_bias, axis=(- 1))) embeddings = tf.reduce_max(embeddings, axis=(- 2)) valid_object_mask = tf.cast(tf.reduce_any(valid_token_mask, axis=(- 1)), tf.float32) embeddings = (embeddings * tf.expand_dims(valid_object_mask, axis=(- 1))) elif (self._hparams['obj_text_aggregation'] == 'sum'): real_objects = tf.cast(tf.greater_equal(token_ids, 4), tf.float32) embeddings = tf.reduce_sum(input_tensor=(embeddings * tf.expand_dims(real_objects, 3)), axis=(- 2)) else: raise ValueError(('Unrecognized token aggregation %s' % self._hparams['obj_text_aggregation'])) return embeddings
-6,579,745,571,836,428,000
Aggregate text embedding for a UI element.
widget_caption/widget_caption_model.py
_aggregate_text_embedding
AI21212019/google-research
python
def _aggregate_text_embedding(self, token_ids, embeddings): if (self._hparams['obj_text_aggregation'] == 'max'): valid_token_mask = tf.greater_equal(token_ids, 4) invalid_token_bias = (tf.cast(tf.logical_not(valid_token_mask), tf.float32) * (- 1000000000.0)) embeddings = (embeddings + tf.expand_dims(invalid_token_bias, axis=(- 1))) embeddings = tf.reduce_max(embeddings, axis=(- 2)) valid_object_mask = tf.cast(tf.reduce_any(valid_token_mask, axis=(- 1)), tf.float32) embeddings = (embeddings * tf.expand_dims(valid_object_mask, axis=(- 1))) elif (self._hparams['obj_text_aggregation'] == 'sum'): real_objects = tf.cast(tf.greater_equal(token_ids, 4), tf.float32) embeddings = tf.reduce_sum(input_tensor=(embeddings * tf.expand_dims(real_objects, 3)), axis=(- 2)) else: raise ValueError(('Unrecognized token aggregation %s' % self._hparams['obj_text_aggregation'])) return embeddings
def call(self, decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training, cache=None): 'Return the output of the decoder layer stacks.\n\n Args:\n decoder_inputs: A tensor with shape [batch_size, target_length,\n hidden_size].\n encoder_outputs: A tensor with shape [batch_size, input_length,\n hidden_size]\n decoder_self_attention_bias: A tensor with shape [1, 1, target_len,\n target_length], the bias for decoder self-attention layer.\n attention_bias: A tensor with shape [batch_size, 1, 1, input_length], the\n bias for encoder-decoder attention layer.\n training: A bool, whether in training mode or not.\n cache: (Used for fast decoding) A nested dictionary storing previous\n decoder self-attention values. The items are:\n {layer_n: {"k": A tensor with shape [batch_size, i, key_channels],\n "v": A tensor with shape [batch_size, i, value_channels]},\n ...}\n\n Returns:\n Output of decoder layer stack.\n float32 tensor with shape [batch_size, target_length, hidden_size]\n ' outputs = self._transformer_decoder(decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training=training, cache=cache) return outputs
-5,761,504,163,877,722,000
Return the output of the decoder layer stacks. Args: decoder_inputs: A tensor with shape [batch_size, target_length, hidden_size]. encoder_outputs: A tensor with shape [batch_size, input_length, hidden_size] decoder_self_attention_bias: A tensor with shape [1, 1, target_len, target_length], the bias for decoder self-attention layer. attention_bias: A tensor with shape [batch_size, 1, 1, input_length], the bias for encoder-decoder attention layer. training: A bool, whether in training mode or not. cache: (Used for fast decoding) A nested dictionary storing previous decoder self-attention values. The items are: {layer_n: {"k": A tensor with shape [batch_size, i, key_channels], "v": A tensor with shape [batch_size, i, value_channels]}, ...} Returns: Output of decoder layer stack. float32 tensor with shape [batch_size, target_length, hidden_size]
widget_caption/widget_caption_model.py
call
AI21212019/google-research
python
def call(self, decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training, cache=None): 'Return the output of the decoder layer stacks.\n\n Args:\n decoder_inputs: A tensor with shape [batch_size, target_length,\n hidden_size].\n encoder_outputs: A tensor with shape [batch_size, input_length,\n hidden_size]\n decoder_self_attention_bias: A tensor with shape [1, 1, target_len,\n target_length], the bias for decoder self-attention layer.\n attention_bias: A tensor with shape [batch_size, 1, 1, input_length], the\n bias for encoder-decoder attention layer.\n training: A bool, whether in training mode or not.\n cache: (Used for fast decoding) A nested dictionary storing previous\n decoder self-attention values. The items are:\n {layer_n: {"k": A tensor with shape [batch_size, i, key_channels],\n "v": A tensor with shape [batch_size, i, value_channels]},\n ...}\n\n Returns:\n Output of decoder layer stack.\n float32 tensor with shape [batch_size, target_length, hidden_size]\n ' outputs = self._transformer_decoder(decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training=training, cache=cache) return outputs
def compute_caption_metrics(self, predictions, references): 'Computes the eval metrics for decoding.' py_types = ([tf.float32] * len(self._SCORE_NAMES)) scores = tf.py_function(compute_score, (predictions, references, self._word_vocab), py_types) for (name, score) in zip(self._SCORE_NAMES, scores): scoped_name = 'COCO/{}'.format(name) self.caption_metrics[scoped_name].update_state(score) self.model_metrics[scoped_name] = self.caption_metrics[scoped_name]
-774,783,375,998,931,000
Computes the eval metrics for decoding.
widget_caption/widget_caption_model.py
compute_caption_metrics
AI21212019/google-research
python
def compute_caption_metrics(self, predictions, references): py_types = ([tf.float32] * len(self._SCORE_NAMES)) scores = tf.py_function(compute_score, (predictions, references, self._word_vocab), py_types) for (name, score) in zip(self._SCORE_NAMES, scores): scoped_name = 'COCO/{}'.format(name) self.caption_metrics[scoped_name].update_state(score) self.model_metrics[scoped_name] = self.caption_metrics[scoped_name]
def decode(self, targets, encoder_outputs, training): 'Generate logits for each value in the target sequence.\n\n Args:\n targets: target values for the output sequence. int tensor with shape\n [batch_size, target_length]\n encoder_outputs: continuous representation of input sequence. float tensor\n with shape [batch_size, input_length, hidden_size]\n training: boolean, whether in training mode or not.\n\n Returns:\n float32 tensor with shape [batch_size, target_length, vocab_size]\n ' with tf.name_scope('decode'): length = tf.shape(targets)[1] decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(length) encoder_shape = tf.shape(encoder_outputs) mask = tf.ones([encoder_shape[0], encoder_shape[1]]) attention_bias = model_utils.get_padding_bias(mask) targets = tf.pad(targets, [[0, 0], [1, 0]], constant_values=input_utils.START) targets = targets[:, :(- 1)] decoder_inputs = self._word_embedding_layer(targets) with tf.name_scope('add_pos_encoding'): pos_encoding = self._position_embedding_layer(decoder_inputs) decoder_inputs += pos_encoding if training: decoder_inputs = tf.nn.dropout(decoder_inputs, rate=self._hparams['layer_postprocess_dropout']) decoder_outputs = self._decoder(decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training=training) logits = self._word_layer(decoder_outputs) return logits
5,422,571,246,841,141,000
Generate logits for each value in the target sequence. Args: targets: target values for the output sequence. int tensor with shape [batch_size, target_length] encoder_outputs: continuous representation of input sequence. float tensor with shape [batch_size, input_length, hidden_size] training: boolean, whether in training mode or not. Returns: float32 tensor with shape [batch_size, target_length, vocab_size]
widget_caption/widget_caption_model.py
decode
AI21212019/google-research
python
def decode(self, targets, encoder_outputs, training): 'Generate logits for each value in the target sequence.\n\n Args:\n targets: target values for the output sequence. int tensor with shape\n [batch_size, target_length]\n encoder_outputs: continuous representation of input sequence. float tensor\n with shape [batch_size, input_length, hidden_size]\n training: boolean, whether in training mode or not.\n\n Returns:\n float32 tensor with shape [batch_size, target_length, vocab_size]\n ' with tf.name_scope('decode'): length = tf.shape(targets)[1] decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(length) encoder_shape = tf.shape(encoder_outputs) mask = tf.ones([encoder_shape[0], encoder_shape[1]]) attention_bias = model_utils.get_padding_bias(mask) targets = tf.pad(targets, [[0, 0], [1, 0]], constant_values=input_utils.START) targets = targets[:, :(- 1)] decoder_inputs = self._word_embedding_layer(targets) with tf.name_scope('add_pos_encoding'): pos_encoding = self._position_embedding_layer(decoder_inputs) decoder_inputs += pos_encoding if training: decoder_inputs = tf.nn.dropout(decoder_inputs, rate=self._hparams['layer_postprocess_dropout']) decoder_outputs = self._decoder(decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias, training=training) logits = self._word_layer(decoder_outputs) return logits
def predict(self, encoder_outputs, training): 'Return predicted sequence.' batch_size = tf.shape(encoder_outputs)[0] symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length=self._MAX_DECODE_LENGTH, training=training) initial_ids = (tf.ones([batch_size], dtype=tf.int32) * input_utils.START) init_decode_length = 0 num_heads = self._hparams['num_heads'] dim_per_head = (self._hparams['hidden_size'] // num_heads) cache = {('layer_%d' % layer): {'k': tf.zeros([batch_size, init_decode_length, num_heads, dim_per_head]), 'v': tf.zeros([batch_size, init_decode_length, num_heads, dim_per_head])} for layer in range(self._hparams['num_hidden_layers'])} encoder_shape = tf.shape(encoder_outputs) mask = tf.ones([encoder_shape[0], encoder_shape[1]]) attention_bias = model_utils.get_padding_bias(mask) cache['encoder_outputs'] = encoder_outputs cache['encoder_decoder_attention_bias'] = attention_bias (decoded_ids, _) = ops.beam_search.sequence_beam_search(symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self._hparams['vocab_size'], beam_size=self._hparams['beam_size'], alpha=1, max_decode_length=self._MAX_DECODE_LENGTH, eos_id=input_utils.EOS) top_decoded_ids = decoded_ids[:, 0, 1:] return top_decoded_ids
-2,499,680,710,903,466,500
Return predicted sequence.
widget_caption/widget_caption_model.py
predict
AI21212019/google-research
python
def predict(self, encoder_outputs, training): batch_size = tf.shape(encoder_outputs)[0] symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length=self._MAX_DECODE_LENGTH, training=training) initial_ids = (tf.ones([batch_size], dtype=tf.int32) * input_utils.START) init_decode_length = 0 num_heads = self._hparams['num_heads'] dim_per_head = (self._hparams['hidden_size'] // num_heads) cache = {('layer_%d' % layer): {'k': tf.zeros([batch_size, init_decode_length, num_heads, dim_per_head]), 'v': tf.zeros([batch_size, init_decode_length, num_heads, dim_per_head])} for layer in range(self._hparams['num_hidden_layers'])} encoder_shape = tf.shape(encoder_outputs) mask = tf.ones([encoder_shape[0], encoder_shape[1]]) attention_bias = model_utils.get_padding_bias(mask) cache['encoder_outputs'] = encoder_outputs cache['encoder_decoder_attention_bias'] = attention_bias (decoded_ids, _) = ops.beam_search.sequence_beam_search(symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self._hparams['vocab_size'], beam_size=self._hparams['beam_size'], alpha=1, max_decode_length=self._MAX_DECODE_LENGTH, eos_id=input_utils.EOS) top_decoded_ids = decoded_ids[:, 0, 1:] return top_decoded_ids
def compute_targets(self, features): 'Compute the target token ids and phrase ids.' batch_size = tf.shape(features['label_flag'])[0] num_objects = tf.shape(features['label_flag'])[1] worker_position = self._caption_object_selector(features) valid_references = tf.gather(tf.reshape(features['reference'], [(- 1)]), worker_position) target_phrase = features['caption_token_id'] target_phrase = tf.reshape(target_phrase, [(batch_size * num_objects), self._MAX_DECODE_LENGTH]) valid_target_phrase = tf.gather(target_phrase, worker_position) return (valid_target_phrase, valid_references)
402,812,099,101,840,260
Compute the target token ids and phrase ids.
widget_caption/widget_caption_model.py
compute_targets
AI21212019/google-research
python
def compute_targets(self, features): batch_size = tf.shape(features['label_flag'])[0] num_objects = tf.shape(features['label_flag'])[1] worker_position = self._caption_object_selector(features) valid_references = tf.gather(tf.reshape(features['reference'], [(- 1)]), worker_position) target_phrase = features['caption_token_id'] target_phrase = tf.reshape(target_phrase, [(batch_size * num_objects), self._MAX_DECODE_LENGTH]) valid_target_phrase = tf.gather(target_phrase, worker_position) return (valid_target_phrase, valid_references)
def _get_symbols_to_logits_fn(self, max_decode_length, training): 'Returns a decoding function that calculates logits of the next tokens.' timing_signal = self._position_embedding_layer(inputs=None, length=max_decode_length) decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(max_decode_length) def symbols_to_logits_fn(ids, i, cache): 'Generate logits for next potential IDs.\n\n Args:\n ids: Current decoded sequences. int tensor with shape [batch_size *\n beam_size, i + 1].\n i: Loop index.\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n ' decoder_input = ids[:, (- 1):] decoder_input = self._word_embedding_layer(decoder_input) decoder_input += timing_signal[i:(i + 1)] self_attention_bias = decoder_self_attention_bias[:, :, i:(i + 1), :(i + 1)] decoder_outputs = self._decoder(decoder_input, cache.get('encoder_outputs'), self_attention_bias, cache.get('encoder_decoder_attention_bias'), training=training, cache=cache) decoder_outputs = decoder_outputs[:, (- 1), :] logits = self._word_layer(decoder_outputs) return (logits, cache) return symbols_to_logits_fn
-132,147,364,262,350,850
Returns a decoding function that calculates logits of the next tokens.
widget_caption/widget_caption_model.py
_get_symbols_to_logits_fn
AI21212019/google-research
python
def _get_symbols_to_logits_fn(self, max_decode_length, training): timing_signal = self._position_embedding_layer(inputs=None, length=max_decode_length) decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(max_decode_length) def symbols_to_logits_fn(ids, i, cache): 'Generate logits for next potential IDs.\n\n Args:\n ids: Current decoded sequences. int tensor with shape [batch_size *\n beam_size, i + 1].\n i: Loop index.\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n ' decoder_input = ids[:, (- 1):] decoder_input = self._word_embedding_layer(decoder_input) decoder_input += timing_signal[i:(i + 1)] self_attention_bias = decoder_self_attention_bias[:, :, i:(i + 1), :(i + 1)] decoder_outputs = self._decoder(decoder_input, cache.get('encoder_outputs'), self_attention_bias, cache.get('encoder_decoder_attention_bias'), training=training, cache=cache) decoder_outputs = decoder_outputs[:, (- 1), :] logits = self._word_layer(decoder_outputs) return (logits, cache) return symbols_to_logits_fn
def symbols_to_logits_fn(ids, i, cache): 'Generate logits for next potential IDs.\n\n Args:\n ids: Current decoded sequences. int tensor with shape [batch_size *\n beam_size, i + 1].\n i: Loop index.\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n ' decoder_input = ids[:, (- 1):] decoder_input = self._word_embedding_layer(decoder_input) decoder_input += timing_signal[i:(i + 1)] self_attention_bias = decoder_self_attention_bias[:, :, i:(i + 1), :(i + 1)] decoder_outputs = self._decoder(decoder_input, cache.get('encoder_outputs'), self_attention_bias, cache.get('encoder_decoder_attention_bias'), training=training, cache=cache) decoder_outputs = decoder_outputs[:, (- 1), :] logits = self._word_layer(decoder_outputs) return (logits, cache)
-6,548,694,566,543,476,000
Generate logits for next potential IDs. Args: ids: Current decoded sequences. int tensor with shape [batch_size * beam_size, i + 1]. i: Loop index. cache: dictionary of values storing the encoder output, encoder-decoder attention bias, and previous decoder attention values. Returns: Tuple of (logits with shape [batch_size * beam_size, vocab_size], updated cache values)
widget_caption/widget_caption_model.py
symbols_to_logits_fn
AI21212019/google-research
python
def symbols_to_logits_fn(ids, i, cache): 'Generate logits for next potential IDs.\n\n Args:\n ids: Current decoded sequences. int tensor with shape [batch_size *\n beam_size, i + 1].\n i: Loop index.\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n ' decoder_input = ids[:, (- 1):] decoder_input = self._word_embedding_layer(decoder_input) decoder_input += timing_signal[i:(i + 1)] self_attention_bias = decoder_self_attention_bias[:, :, i:(i + 1), :(i + 1)] decoder_outputs = self._decoder(decoder_input, cache.get('encoder_outputs'), self_attention_bias, cache.get('encoder_decoder_attention_bias'), training=training, cache=cache) decoder_outputs = decoder_outputs[:, (- 1), :] logits = self._word_layer(decoder_outputs) return (logits, cache)
@deprecation.deprecated(date, instructions) def _fn(arg0, arg1): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n ' return (arg0 + arg1)
8,087,309,597,350,725,000
fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated(date, instructions) def _fn(arg0, arg1): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n ' return (arg0 + arg1)
@deprecation.deprecated(date, instructions) def _fn(arg0, arg1): 'fn doc.' return (arg0 + arg1)
-2,257,126,715,904,382,700
fn doc.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated(date, instructions) def _fn(arg0, arg1): return (arg0 + arg1)
@deprecation.deprecated_args(date, instructions, 'deprecated') def _fn(arg0, arg1, deprecated=True): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n ' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
8,094,843,131,514,642,000
fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated_args(date, instructions, 'deprecated') def _fn(arg0, arg1, deprecated=True): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n ' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
@deprecation.deprecated_args(date, instructions, 'deprecated') def _fn(arg0, arg1, deprecated=True): 'fn doc.' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
94,406,864,349,320,930
fn doc.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated_args(date, instructions, 'deprecated') def _fn(arg0, arg1, deprecated=True): return ((arg0 + arg1) if deprecated else (arg1 + arg0))
@deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n ' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
6,193,847,633,101,739,000
fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n ' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
@deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): 'fn doc.' return ((arg0 + arg1) if deprecated else (arg1 + arg0))
1,241,249,845,256,133,000
fn doc.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): return ((arg0 + arg1) if deprecated else (arg1 + arg0))
@deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n ' return (arg0 + arg1)
3,140,255,006,421,992,400
fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): 'fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n ' return (arg0 + arg1)
@deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): 'fn doc.' return (arg0 + arg1)
-8,935,820,396,118,256,000
fn doc.
tensorflow/python/util/deprecation_test.py
_fn
AccelAI/tensorflow
python
@deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): return (arg0 + arg1)
@property @deprecation.deprecated(date, instructions) def _prop(self): 'prop doc.\n\n Returns:\n String.\n ' return 'prop_with_doc'
-8,016,098,677,120,810,000
prop doc. Returns: String.
tensorflow/python/util/deprecation_test.py
_prop
AccelAI/tensorflow
python
@property @deprecation.deprecated(date, instructions) def _prop(self): 'prop doc.\n\n Returns:\n String.\n ' return 'prop_with_doc'
def terms_from_trace(tr): 'Helper function to extract elbo components from execution traces.' terms = {'log_factors': [], 'log_measures': [], 'scale': to_funsor(1.0), 'plate_vars': frozenset(), 'measure_vars': frozenset(), 'plate_to_step': dict()} for (name, node) in tr.nodes.items(): if (node['type'] == 'markov_chain'): terms['plate_to_step'][node['name']] = node['value'] for step in node['value']: terms['measure_vars'] |= frozenset({var for var in step[1:(- 1)] if (tr.nodes[var]['funsor'].get('log_measure', None) is not None)}) if ((node['type'] != 'sample') or (type(node['fn']).__name__ == '_Subsample') or node['infer'].get('_do_not_score', False)): continue terms['plate_vars'] |= frozenset((f.name for f in node['cond_indep_stack'] if f.vectorized)) if (node['funsor'].get('log_measure', None) is not None): terms['log_measures'].append(node['funsor']['log_measure']) terms['measure_vars'] |= ((frozenset(node['funsor']['value'].inputs) | {name}) - terms['plate_vars']) if (node.get('replay_active', False) and (set(node['funsor']['log_prob'].inputs) & terms['measure_vars']) and (float(to_data(node['funsor']['scale'])) != 1.0)): terms['scale'] = node['funsor']['scale'] else: node['funsor']['log_prob'] = (node['funsor']['log_prob'] * node['funsor']['scale']) if (node['is_observed'] or (not node.get('replay_skipped', False))): terms['log_factors'].append(node['funsor']['log_prob']) terms['plate_to_step'].update({plate: terms['plate_to_step'].get(plate, {}) for plate in terms['plate_vars']}) return terms
1,669,444,825,470,947,600
Helper function to extract elbo components from execution traces.
pyro/contrib/funsor/infer/traceenum_elbo.py
terms_from_trace
1989Ryan/pyro
python
def terms_from_trace(tr): terms = {'log_factors': [], 'log_measures': [], 'scale': to_funsor(1.0), 'plate_vars': frozenset(), 'measure_vars': frozenset(), 'plate_to_step': dict()} for (name, node) in tr.nodes.items(): if (node['type'] == 'markov_chain'): terms['plate_to_step'][node['name']] = node['value'] for step in node['value']: terms['measure_vars'] |= frozenset({var for var in step[1:(- 1)] if (tr.nodes[var]['funsor'].get('log_measure', None) is not None)}) if ((node['type'] != 'sample') or (type(node['fn']).__name__ == '_Subsample') or node['infer'].get('_do_not_score', False)): continue terms['plate_vars'] |= frozenset((f.name for f in node['cond_indep_stack'] if f.vectorized)) if (node['funsor'].get('log_measure', None) is not None): terms['log_measures'].append(node['funsor']['log_measure']) terms['measure_vars'] |= ((frozenset(node['funsor']['value'].inputs) | {name}) - terms['plate_vars']) if (node.get('replay_active', False) and (set(node['funsor']['log_prob'].inputs) & terms['measure_vars']) and (float(to_data(node['funsor']['scale'])) != 1.0)): terms['scale'] = node['funsor']['scale'] else: node['funsor']['log_prob'] = (node['funsor']['log_prob'] * node['funsor']['scale']) if (node['is_observed'] or (not node.get('replay_skipped', False))): terms['log_factors'].append(node['funsor']['log_prob']) terms['plate_to_step'].update({plate: terms['plate_to_step'].get(plate, {}) for plate in terms['plate_vars']}) return terms
@pytest.mark.parametrize('url', ['https://dev.renku.ch/projects/rokroskar/scratch-project/datasets/7eba3f50-1a19-4282-8a86-2497e0f43809/']) @pytest.mark.integration @flaky(max_runs=30, min_passes=1) def test_dataset_url_import_job(url, svc_client_with_repo): 'Test dataset import via url.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': url} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, url) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (f'service: dataset import {url}' == new_commit.message) response = svc_client.get(f'/jobs/{job_id}', headers=headers) assert response assert_rpc_response(response) assert ('COMPLETED' == response.json['result']['state'])
3,253,797,028,710,346,000
Test dataset import via url.
tests/service/jobs/test_datasets.py
test_dataset_url_import_job
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('url', ['https://dev.renku.ch/projects/rokroskar/scratch-project/datasets/7eba3f50-1a19-4282-8a86-2497e0f43809/']) @pytest.mark.integration @flaky(max_runs=30, min_passes=1) def test_dataset_url_import_job(url, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': url} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, url) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (f'service: dataset import {url}' == new_commit.message) response = svc_client.get(f'/jobs/{job_id}', headers=headers) assert response assert_rpc_response(response) assert ('COMPLETED' == response.json['result']['state'])
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3239980', '10.5281/zenodo.3188334', '10.7910/DVN/TJCLKP']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_job(doi, svc_client_with_repo): 'Test dataset import via doi.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (f'service: dataset import {doi}' == new_commit.message) response = svc_client.get(f'/jobs/{job_id}', headers=headers) assert response assert_rpc_response(response) assert ('COMPLETED' == response.json['result']['state'])
-5,676,521,830,175,043,000
Test dataset import via doi.
tests/service/jobs/test_datasets.py
test_dataset_import_job
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3239980', '10.5281/zenodo.3188334', '10.7910/DVN/TJCLKP']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_job(doi, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (f'service: dataset import {doi}' == new_commit.message) response = svc_client.get(f'/jobs/{job_id}', headers=headers) assert response assert_rpc_response(response) assert ('COMPLETED' == response.json['result']['state'])
@pytest.mark.parametrize('doi,expected_err', [('junkjunkjunk', 'Invalid parameter value'), ('10.5281/zenodo.11111111111111111', 'Invalid parameter value')]) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_junk_job(doi, expected_err, svc_client_with_repo): 'Test dataset import.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] with pytest.raises(ParameterError): dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha == new_commit.hexsha) response = svc_client.get(f'/jobs/{job_id}', data=json.dumps(payload), headers=headers) assert_rpc_response(response) extras = response.json['result']['extras'] assert ('error' in extras) assert (expected_err in extras['error'])
3,725,200,170,761,904,000
Test dataset import.
tests/service/jobs/test_datasets.py
test_dataset_import_junk_job
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('doi,expected_err', [('junkjunkjunk', 'Invalid parameter value'), ('10.5281/zenodo.11111111111111111', 'Invalid parameter value')]) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_junk_job(doi, expected_err, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] with pytest.raises(ParameterError): dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha == new_commit.hexsha) response = svc_client.get(f'/jobs/{job_id}', data=json.dumps(payload), headers=headers) assert_rpc_response(response) extras = response.json['result']['extras'] assert ('error' in extras) assert (expected_err in extras['error'])
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3634052']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_twice_job(doi, svc_client_with_repo): 'Test dataset import.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) with pytest.raises(DatasetExistsError): dataset_import(user, job_id, project_id, doi) new_commit2 = Repo(dest).head.commit assert (new_commit.hexsha == new_commit2.hexsha) response = svc_client.get(f'/jobs/{job_id}', data=json.dumps(payload), headers=headers) assert_rpc_response(response) extras = response.json['result']['extras'] assert ('error' in extras) assert ('Dataset exists' in extras['error'])
-2,663,811,129,041,810,000
Test dataset import.
tests/service/jobs/test_datasets.py
test_dataset_import_twice_job
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3634052']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_import_twice_job(doi, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['job_id'] dataset_import(user, job_id, project_id, doi) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) with pytest.raises(DatasetExistsError): dataset_import(user, job_id, project_id, doi) new_commit2 = Repo(dest).head.commit assert (new_commit.hexsha == new_commit2.hexsha) response = svc_client.get(f'/jobs/{job_id}', data=json.dumps(payload), headers=headers) assert_rpc_response(response) extras = response.json['result']['extras'] assert ('error' in extras) assert ('Dataset exists' in extras['error'])
@pytest.mark.parametrize('url', ['https://gist.github.com/jsam/d957f306ed0fe4ff018e902df6a1c8e3']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_add_remote_file(url, svc_client_with_repo): 'Test dataset add a remote file.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'short_name': uuid.uuid4().hex, 'create_dataset': True, 'files': [{'file_url': url}]} response = svc_client.post('/datasets.add', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'files', 'short_name', 'project_id'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['files'][0]['job_id'] commit_message = 'service: dataset add remote file' dataset_add_remote_file(user, job_id, project_id, True, commit_message, payload['short_name'], url) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (commit_message == new_commit.message)
-4,038,006,578,331,544,600
Test dataset add a remote file.
tests/service/jobs/test_datasets.py
test_dataset_add_remote_file
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('url', ['https://gist.github.com/jsam/d957f306ed0fe4ff018e902df6a1c8e3']) @pytest.mark.integration @pytest.mark.service @flaky(max_runs=30, min_passes=1) def test_dataset_add_remote_file(url, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'short_name': uuid.uuid4().hex, 'create_dataset': True, 'files': [{'file_url': url}]} response = svc_client.post('/datasets.add', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'files', 'short_name', 'project_id'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit job_id = response.json['result']['files'][0]['job_id'] commit_message = 'service: dataset add remote file' dataset_add_remote_file(user, job_id, project_id, True, commit_message, payload['short_name'], url) new_commit = Repo(dest).head.commit assert (old_commit.hexsha != new_commit.hexsha) assert (commit_message == new_commit.message)
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3761586']) @pytest.mark.integration @pytest.mark.service def test_dataset_project_lock(doi, svc_client_with_repo): 'Test dataset project lock.' (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit cache_project_cleanup() new_commit = Repo(dest).head.commit assert (old_commit.hexsha == new_commit.hexsha) assert (dest.exists() and [file for file in dest.glob('*')])
4,504,835,722,190,145,500
Test dataset project lock.
tests/service/jobs/test_datasets.py
test_dataset_project_lock
mohammad-sdsc/renku-python
python
@pytest.mark.parametrize('doi', ['10.5281/zenodo.3761586']) @pytest.mark.integration @pytest.mark.service def test_dataset_project_lock(doi, svc_client_with_repo): (svc_client, headers, project_id, url_components) = svc_client_with_repo user = {'user_id': headers['Renku-User-Id']} payload = {'project_id': project_id, 'dataset_uri': doi} response = svc_client.post('/datasets.import', data=json.dumps(payload), headers=headers) assert response assert_rpc_response(response) assert ({'job_id', 'created_at'} == set(response.json['result'].keys())) dest = make_project_path(user, {'owner': url_components.owner, 'name': url_components.name}) old_commit = Repo(dest).head.commit cache_project_cleanup() new_commit = Repo(dest).head.commit assert (old_commit.hexsha == new_commit.hexsha) assert (dest.exists() and [file for file in dest.glob('*')])
async def _list_and_update(self): '\n Update current list of resources by doing a full fetch.\n\n Overwrites all current resource info.\n ' initial_resources = None kwargs = dict(label_selector=self.label_selector, field_selector=self.field_selector, _request_timeout=self.request_timeout, _preload_content=False) if (not self.omit_namespace): kwargs['namespace'] = self.namespace list_method = getattr(self.api, self.list_method_name) initial_resources_raw = (await list_method(**kwargs)) initial_resources = json.loads((await initial_resources_raw.read())) self.resources = {f"{p['metadata']['namespace']}/{p['metadata']['name']}": p for p in initial_resources['items']} if (not self.first_load_future.done()): self.first_load_future.set_result(None) return initial_resources['metadata']['resourceVersion']
8,317,582,099,941,674,000
Update current list of resources by doing a full fetch. Overwrites all current resource info.
kubespawner/reflector.py
_list_and_update
choldgraf/kubespawner
python
async def _list_and_update(self): '\n Update current list of resources by doing a full fetch.\n\n Overwrites all current resource info.\n ' initial_resources = None kwargs = dict(label_selector=self.label_selector, field_selector=self.field_selector, _request_timeout=self.request_timeout, _preload_content=False) if (not self.omit_namespace): kwargs['namespace'] = self.namespace list_method = getattr(self.api, self.list_method_name) initial_resources_raw = (await list_method(**kwargs)) initial_resources = json.loads((await initial_resources_raw.read())) self.resources = {f"{p['metadata']['namespace']}/{p['metadata']['name']}": p for p in initial_resources['items']} if (not self.first_load_future.done()): self.first_load_future.set_result(None) return initial_resources['metadata']['resourceVersion']
async def _watch_and_update(self): "\n Keeps the current list of resources up-to-date\n\n We first fetch the list of current resources, and store that. Then we\n register to be notified of changes to those resources, and keep our\n local store up-to-date based on these notifications.\n\n We also perform exponential backoff, giving up after we hit 32s\n wait time. This should protect against network connections dropping\n and intermittent unavailability of the api-server. Every time we\n recover from an exception we also do a full fetch, to pick up\n changes that might've been missed in the time we were not doing\n a watch.\n\n Since the resources are read-only in the Spawner (where they are\n used), then this is safe. The Spawner's view of the world might be\n out-of-date, but it's not going to corrupt any data.\n " selectors = [] if self.label_selector: selectors.append(('label selector=%r' % self.label_selector)) if self.field_selector: selectors.append(('field selector=%r' % self.field_selector)) log_selector = ', '.join(selectors) cur_delay = 0.1 if self.omit_namespace: ns_str = 'all namespaces' else: ns_str = 'namespace {}'.format(self.namespace) self.log.info('watching for %s with %s in %s', self.kind, log_selector, ns_str) while True: self.log.debug('Connecting %s watcher', self.kind) start = time.monotonic() w = watch.Watch() try: resource_version = (await self._list_and_update()) watch_args = {'label_selector': self.label_selector, 'field_selector': self.field_selector, 'resource_version': resource_version} if (not self.omit_namespace): watch_args['namespace'] = self.namespace if self.request_timeout: watch_args['_request_timeout'] = self.request_timeout if self.timeout_seconds: watch_args['timeout_seconds'] = self.timeout_seconds method = partial(getattr(self.api, self.list_method_name), _preload_content=False) async with w.stream(method, **watch_args) as stream: async for watch_event in stream: cur_delay = 0.1 resource = watch_event['raw_object'] ref_key = '{}/{}'.format(resource['metadata']['namespace'], resource['metadata']['name']) if (watch_event['type'] == 'DELETED'): self.resources.pop(ref_key, None) else: self.resources[ref_key] = resource if self._stopping: self.log.info('%s watcher stopped: inner', self.kind) break watch_duration = (time.monotonic() - start) if (watch_duration >= self.restart_seconds): self.log.debug('Restarting %s watcher after %i seconds', self.kind, watch_duration) break except ReadTimeoutError: self.log.warning('Read timeout watching %s, reconnecting', self.kind) continue except asyncio.CancelledError: self.log.debug('Cancelled watching %s', self.kind) raise except Exception: cur_delay = (cur_delay * 2) if (cur_delay > 30): self.log.exception('Watching resources never recovered, giving up') if self.on_failure: self.on_failure() return self.log.exception('Error when watching resources, retrying in %ss', cur_delay) (await asyncio.sleep(cur_delay)) continue else: self.log.debug('%s watcher timeout', self.kind) finally: w.stop() if self._stopping: self.log.info('%s watcher stopped: outer', self.kind) break self.log.warning('%s watcher finished', self.kind)
1,615,267,037,276,851,700
Keeps the current list of resources up-to-date We first fetch the list of current resources, and store that. Then we register to be notified of changes to those resources, and keep our local store up-to-date based on these notifications. We also perform exponential backoff, giving up after we hit 32s wait time. This should protect against network connections dropping and intermittent unavailability of the api-server. Every time we recover from an exception we also do a full fetch, to pick up changes that might've been missed in the time we were not doing a watch. Since the resources are read-only in the Spawner (where they are used), then this is safe. The Spawner's view of the world might be out-of-date, but it's not going to corrupt any data.
kubespawner/reflector.py
_watch_and_update
choldgraf/kubespawner
python
async def _watch_and_update(self): "\n Keeps the current list of resources up-to-date\n\n We first fetch the list of current resources, and store that. Then we\n register to be notified of changes to those resources, and keep our\n local store up-to-date based on these notifications.\n\n We also perform exponential backoff, giving up after we hit 32s\n wait time. This should protect against network connections dropping\n and intermittent unavailability of the api-server. Every time we\n recover from an exception we also do a full fetch, to pick up\n changes that might've been missed in the time we were not doing\n a watch.\n\n Since the resources are read-only in the Spawner (where they are\n used), then this is safe. The Spawner's view of the world might be\n out-of-date, but it's not going to corrupt any data.\n " selectors = [] if self.label_selector: selectors.append(('label selector=%r' % self.label_selector)) if self.field_selector: selectors.append(('field selector=%r' % self.field_selector)) log_selector = ', '.join(selectors) cur_delay = 0.1 if self.omit_namespace: ns_str = 'all namespaces' else: ns_str = 'namespace {}'.format(self.namespace) self.log.info('watching for %s with %s in %s', self.kind, log_selector, ns_str) while True: self.log.debug('Connecting %s watcher', self.kind) start = time.monotonic() w = watch.Watch() try: resource_version = (await self._list_and_update()) watch_args = {'label_selector': self.label_selector, 'field_selector': self.field_selector, 'resource_version': resource_version} if (not self.omit_namespace): watch_args['namespace'] = self.namespace if self.request_timeout: watch_args['_request_timeout'] = self.request_timeout if self.timeout_seconds: watch_args['timeout_seconds'] = self.timeout_seconds method = partial(getattr(self.api, self.list_method_name), _preload_content=False) async with w.stream(method, **watch_args) as stream: async for watch_event in stream: cur_delay = 0.1 resource = watch_event['raw_object'] ref_key = '{}/{}'.format(resource['metadata']['namespace'], resource['metadata']['name']) if (watch_event['type'] == 'DELETED'): self.resources.pop(ref_key, None) else: self.resources[ref_key] = resource if self._stopping: self.log.info('%s watcher stopped: inner', self.kind) break watch_duration = (time.monotonic() - start) if (watch_duration >= self.restart_seconds): self.log.debug('Restarting %s watcher after %i seconds', self.kind, watch_duration) break except ReadTimeoutError: self.log.warning('Read timeout watching %s, reconnecting', self.kind) continue except asyncio.CancelledError: self.log.debug('Cancelled watching %s', self.kind) raise except Exception: cur_delay = (cur_delay * 2) if (cur_delay > 30): self.log.exception('Watching resources never recovered, giving up') if self.on_failure: self.on_failure() return self.log.exception('Error when watching resources, retrying in %ss', cur_delay) (await asyncio.sleep(cur_delay)) continue else: self.log.debug('%s watcher timeout', self.kind) finally: w.stop() if self._stopping: self.log.info('%s watcher stopped: outer', self.kind) break self.log.warning('%s watcher finished', self.kind)
async def start(self): "\n Start the reflection process!\n\n We'll do a blocking read of all resources first, so that we don't\n race with any operations that are checking the state of the pod\n store - such as polls. This should be called only once at the\n start of program initialization (when the singleton is being created),\n and not afterwards!\n " if (self.watch_task and (not self.watch_task.done())): raise RuntimeError('Task watching for resources is already running') (await self._list_and_update()) self.watch_task = asyncio.create_task(self._watch_and_update())
7,907,563,787,778,749,000
Start the reflection process! We'll do a blocking read of all resources first, so that we don't race with any operations that are checking the state of the pod store - such as polls. This should be called only once at the start of program initialization (when the singleton is being created), and not afterwards!
kubespawner/reflector.py
start
choldgraf/kubespawner
python
async def start(self): "\n Start the reflection process!\n\n We'll do a blocking read of all resources first, so that we don't\n race with any operations that are checking the state of the pod\n store - such as polls. This should be called only once at the\n start of program initialization (when the singleton is being created),\n and not afterwards!\n " if (self.watch_task and (not self.watch_task.done())): raise RuntimeError('Task watching for resources is already running') (await self._list_and_update()) self.watch_task = asyncio.create_task(self._watch_and_update())
async def stop(self): '\n Cleanly shut down the watch task.\n ' self._stopping = True if (self.watch_task and (not self.watch_task.done())): self.watch_task.cancel() try: timeout = 5 (await asyncio.wait_for(self.watch_task, timeout)) except asyncio.TimeoutError: self.log.warning(f'Watch task did not finish in {timeout}s and was cancelled') self.watch_task = None
-6,765,147,635,296,929,000
Cleanly shut down the watch task.
kubespawner/reflector.py
stop
choldgraf/kubespawner
python
async def stop(self): '\n \n ' self._stopping = True if (self.watch_task and (not self.watch_task.done())): self.watch_task.cancel() try: timeout = 5 (await asyncio.wait_for(self.watch_task, timeout)) except asyncio.TimeoutError: self.log.warning(f'Watch task did not finish in {timeout}s and was cancelled') self.watch_task = None
def __init__(self, configuration): 'Initialize the model.\n ' super().__init__(configuration) self.loss_names = ['segmentation'] self.network_names = ['unet'] self.netunet = UNet(1, 2) self.netunet = self.netunet.to(self.device) if self.is_train: self.criterion_loss = torch.nn.CrossEntropyLoss() self.optimizer = torch.optim.Adam(self.netunet.parameters(), lr=configuration['lr']) self.optimizers = [self.optimizer] self.val_predictions = [] self.val_labels = [] self.val_images = []
-4,904,474,162,502,732,000
Initialize the model.
models/segmentation_model.py
__init__
Semere-Gr/PyTorchProjectFramework
python
def __init__(self, configuration): '\n ' super().__init__(configuration) self.loss_names = ['segmentation'] self.network_names = ['unet'] self.netunet = UNet(1, 2) self.netunet = self.netunet.to(self.device) if self.is_train: self.criterion_loss = torch.nn.CrossEntropyLoss() self.optimizer = torch.optim.Adam(self.netunet.parameters(), lr=configuration['lr']) self.optimizers = [self.optimizer] self.val_predictions = [] self.val_labels = [] self.val_images = []
def forward(self): 'Run forward pass.\n ' self.output = self.netunet(self.input)
4,487,781,734,835,777,000
Run forward pass.
models/segmentation_model.py
forward
Semere-Gr/PyTorchProjectFramework
python
def forward(self): '\n ' self.output = self.netunet(self.input)
def backward(self): 'Calculate losses; called in every training iteration.\n ' self.loss_segmentation = self.criterion_loss(self.output, self.label)
830,833,094,205,316,900
Calculate losses; called in every training iteration.
models/segmentation_model.py
backward
Semere-Gr/PyTorchProjectFramework
python
def backward(self): '\n ' self.loss_segmentation = self.criterion_loss(self.output, self.label)
def optimize_parameters(self): 'Calculate gradients and update network weights.\n ' self.loss_segmentation.backward() self.optimizer.step() self.optimizer.zero_grad() torch.cuda.empty_cache()
-8,851,077,193,266,028,000
Calculate gradients and update network weights.
models/segmentation_model.py
optimize_parameters
Semere-Gr/PyTorchProjectFramework
python
def optimize_parameters(self): '\n ' self.loss_segmentation.backward() self.optimizer.step() self.optimizer.zero_grad() torch.cuda.empty_cache()
def _str_to_identifier(string): 'Convert a string to a valid Python identifier.' return re.sub('\\W|^(?=\\d)', '_', string)
6,501,756,464,600,183,000
Convert a string to a valid Python identifier.
utils/test/test_tutorials.py
_str_to_identifier
ignaziopedone/qiskit-iqx-tutorials
python
def _str_to_identifier(string): return re.sub('\\W|^(?=\\d)', '_', string)
def create_test(filename): 'Return a new test function.' def test_function(self): self._run_notebook(filename) return test_function
-7,282,264,453,090,514,000
Return a new test function.
utils/test/test_tutorials.py
create_test
ignaziopedone/qiskit-iqx-tutorials
python
def create_test(filename): def test_function(self): self._run_notebook(filename) return test_function
@app.route((url_prefix + 'oauth2/clients/<client_id>'), methods=['GET']) @auth_required def oauth2_clients(client_id: str) -> Response: '\n Return OAuth2 client applications\n\n :return:\n GET /ajax/oauth2/clients: list of OAuth2 clients\n ' client = next((c for c in oauth_clients.values() if (c.client_id == client_id)), None) if (not client): raise UnknownClientError() return json_response(dict(id=client.client_id, name=client.name, description=client.description, icon=client.icon))
4,067,921,045,567,628,000
Return OAuth2 client applications :return: GET /ajax/oauth2/clients: list of OAuth2 clients
afterglow_core/views/ajax_api/oauth2_clients.py
oauth2_clients
SkynetRTN/afterglow-access-server
python
@app.route((url_prefix + 'oauth2/clients/<client_id>'), methods=['GET']) @auth_required def oauth2_clients(client_id: str) -> Response: '\n Return OAuth2 client applications\n\n :return:\n GET /ajax/oauth2/clients: list of OAuth2 clients\n ' client = next((c for c in oauth_clients.values() if (c.client_id == client_id)), None) if (not client): raise UnknownClientError() return json_response(dict(id=client.client_id, name=client.name, description=client.description, icon=client.icon))
def pws_constants(t): 'Lookup-table for water vapor saturation pressure constants (A, m, Tn).' if (t < (- 20)): raise ValueError('Temperature out of range (-20 - 350°C') if (t < 50): return (6.116441, 7.591386, 240.7263) if (t < 100): return (6.004918, 7.337936, 229.3975) if (t < 150): return (5.856548, 7.27731, 225.1033) if (t < 200): return (6.002859, 7.290361, 227.1704) return (9.980622, 7.388931, 263.1239)
3,113,030,149,863,625,000
Lookup-table for water vapor saturation pressure constants (A, m, Tn).
sht21_usermode.py
pws_constants
AmedeeBulle/collectd-python-plugins
python
def pws_constants(t): if (t < (- 20)): raise ValueError('Temperature out of range (-20 - 350°C') if (t < 50): return (6.116441, 7.591386, 240.7263) if (t < 100): return (6.004918, 7.337936, 229.3975) if (t < 150): return (5.856548, 7.27731, 225.1033) if (t < 200): return (6.002859, 7.290361, 227.1704) return (9.980622, 7.388931, 263.1239)
def pws(t): '\n Calculate water vapor saturation pressure based on temperature (in hPa).\n\n P_{WS} = A \\cdot 10^{\\frac{m \\cdot T}{T + T_n}}\n\n ' (A, m, Tn) = pws_constants(t) power = ((m * t) / (t + Tn)) return (A * (10 ** power))
-7,582,722,841,005,355,000
Calculate water vapor saturation pressure based on temperature (in hPa). P_{WS} = A \cdot 10^{\frac{m \cdot T}{T + T_n}}
sht21_usermode.py
pws
AmedeeBulle/collectd-python-plugins
python
def pws(t): '\n Calculate water vapor saturation pressure based on temperature (in hPa).\n\n P_{WS} = A \\cdot 10^{\\frac{m \\cdot T}{T + T_n}}\n\n ' (A, m, Tn) = pws_constants(t) power = ((m * t) / (t + Tn)) return (A * (10 ** power))
def pw(t, rh): '\n Calculate Pw (in hPa).\n\n P_W = P_{WS} \\cdot RH / 100\n\n ' return ((pws(t) * rh) / 100)
5,758,262,469,975,820,000
Calculate Pw (in hPa). P_W = P_{WS} \cdot RH / 100
sht21_usermode.py
pw
AmedeeBulle/collectd-python-plugins
python
def pw(t, rh): '\n Calculate Pw (in hPa).\n\n P_W = P_{WS} \\cdot RH / 100\n\n ' return ((pws(t) * rh) / 100)
def td(t, rh): '\n Calculate the dew point (in °C).\n\n T_d = \\frac{T_n}{\\frac{m}{log_{10}\\left(\\frac{P_w}{A}\\right)} - 1}\n\n ' (A, m, Tn) = pws_constants(t) Pw = pw(t, rh) return (Tn / ((m / math.log((Pw / A), 10)) - 1))
7,101,251,171,650,572,000
Calculate the dew point (in °C). T_d = \frac{T_n}{\frac{m}{log_{10}\left(\frac{P_w}{A}\right)} - 1}
sht21_usermode.py
td
AmedeeBulle/collectd-python-plugins
python
def td(t, rh): '\n Calculate the dew point (in °C).\n\n T_d = \\frac{T_n}{\\frac{m}{log_{10}\\left(\\frac{P_w}{A}\\right)} - 1}\n\n ' (A, m, Tn) = pws_constants(t) Pw = pw(t, rh) return (Tn / ((m / math.log((Pw / A), 10)) - 1))
def ah(t, rh): '\n Calculate the absolute humidity (in g/m³).\n\n A = C \\cdot P_w / T\n\n ' C = 2.16679 Pw = pw(t, rh) T = celsius_to_kelvin(t) return ((C * (Pw * 100)) / T)
-6,017,293,510,570,832,000
Calculate the absolute humidity (in g/m³). A = C \cdot P_w / T
sht21_usermode.py
ah
AmedeeBulle/collectd-python-plugins
python
def ah(t, rh): '\n Calculate the absolute humidity (in g/m³).\n\n A = C \\cdot P_w / T\n\n ' C = 2.16679 Pw = pw(t, rh) T = celsius_to_kelvin(t) return ((C * (Pw * 100)) / T)
def labels_to_one_hot(labels, n_classes=(43 + 1)): 'Convert 1D array of labels to one hot representation\n\n Args:\n labels: 1D numpy array\n ' new_labels = np.zeros((n_classes,)) new_labels[labels] = 1 return new_labels
-3,730,193,699,893,287,000
Convert 1D array of labels to one hot representation Args: labels: 1D numpy array
test_single.py
labels_to_one_hot
stevenwudi/CarND-Traffic-Sign-Classifier-Project
python
def labels_to_one_hot(labels, n_classes=(43 + 1)): 'Convert 1D array of labels to one hot representation\n\n Args:\n labels: 1D numpy array\n ' new_labels = np.zeros((n_classes,)) new_labels[labels] = 1 return new_labels
def orga_events(request): 'Add data to all template contexts.' context = {'settings': settings} if (not request.path.startswith('/orga/')): return {} if ((not getattr(request, 'user', None)) or (not request.user.is_authenticated)): return context if (not getattr(request, 'event', None)): context['nav_global'] = collect_signal(nav_global, {'sender': None, 'request': request}) return context if getattr(request, 'event', None): _nav_event = [] for (_, response) in nav_event.send_robust(request.event, request=request): if isinstance(response, list): _nav_event += response else: _nav_event.append(response) warnings.warn('Please return a list in your nav_event signal receiver, not a dictionary.', DeprecationWarning) context['nav_event'] = _nav_event context['nav_settings'] = collect_signal(nav_event_settings, {'sender': request.event, 'request': request}) if ((not request.event.is_public) and request.event.settings.custom_domain and request.user.has_perm('cfp.view_event', request.event)): child_session_key = f'child_session_{request.event.pk}' child_session = request.session.get(child_session_key) s = SessionStore() if ((not child_session) or (not s.exists(child_session))): s[f'pretalx_event_access_{request.event.pk}'] = request.session.session_key s.create() context['new_session'] = s.session_key request.session[child_session_key] = s.session_key request.session['event_access'] = True else: context['new_session'] = child_session request.session['event_access'] = True return context
5,796,924,850,713,361,000
Add data to all template contexts.
src/pretalx/orga/context_processors.py
orga_events
ThomasWaldmann/pretalx
python
def orga_events(request): context = {'settings': settings} if (not request.path.startswith('/orga/')): return {} if ((not getattr(request, 'user', None)) or (not request.user.is_authenticated)): return context if (not getattr(request, 'event', None)): context['nav_global'] = collect_signal(nav_global, {'sender': None, 'request': request}) return context if getattr(request, 'event', None): _nav_event = [] for (_, response) in nav_event.send_robust(request.event, request=request): if isinstance(response, list): _nav_event += response else: _nav_event.append(response) warnings.warn('Please return a list in your nav_event signal receiver, not a dictionary.', DeprecationWarning) context['nav_event'] = _nav_event context['nav_settings'] = collect_signal(nav_event_settings, {'sender': request.event, 'request': request}) if ((not request.event.is_public) and request.event.settings.custom_domain and request.user.has_perm('cfp.view_event', request.event)): child_session_key = f'child_session_{request.event.pk}' child_session = request.session.get(child_session_key) s = SessionStore() if ((not child_session) or (not s.exists(child_session))): s[f'pretalx_event_access_{request.event.pk}'] = request.session.session_key s.create() context['new_session'] = s.session_key request.session[child_session_key] = s.session_key request.session['event_access'] = True else: context['new_session'] = child_session request.session['event_access'] = True return context
def testTrack(self): 'Test Track' pass
7,473,533,764,582,402,000
Test Track
test/test_track.py
testTrack
dgiacomo/mux-python
python
def testTrack(self): pass
async def test_default_supported_features(hass, mqtt_mock): 'Test that the correct supported features.' assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG})) entity = hass.states.get('vacuum.mqtttest') entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0) assert (sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(['start', 'stop', 'return_home', 'battery', 'status', 'clean_spot']))
-7,954,197,594,334,600,000
Test that the correct supported features.
tests/components/mqtt/test_state_vacuum.py
test_default_supported_features
FuqiangSong/home-assistant
python
async def test_default_supported_features(hass, mqtt_mock): assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG})) entity = hass.states.get('vacuum.mqtttest') entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0) assert (sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(['start', 'stop', 'return_home', 'battery', 'status', 'clean_spot']))
async def test_all_commands(hass, mqtt_mock): 'Test simple commands send to the vacuum.' config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) (await hass.services.async_call(DOMAIN, SERVICE_START, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'start', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_STOP, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'stop', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_PAUSE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'pause', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_LOCATE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'locate', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'clean_spot', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'return_to_base', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_set_fan_speed(hass, 'medium', 'vacuum.mqtttest')) mqtt_mock.async_publish.assert_called_once_with('vacuum/set_fan_speed', 'medium', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', entity_id='vacuum.mqtttest')) mqtt_mock.async_publish.assert_called_once_with('vacuum/send_command', '44 FE 93', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', {'key': 'value'}, entity_id='vacuum.mqtttest')) assert (json.loads(mqtt_mock.async_publish.mock_calls[(- 1)][1][1]) == {'command': '44 FE 93', 'key': 'value'})
8,679,773,904,677,183,000
Test simple commands send to the vacuum.
tests/components/mqtt/test_state_vacuum.py
test_all_commands
FuqiangSong/home-assistant
python
async def test_all_commands(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) (await hass.services.async_call(DOMAIN, SERVICE_START, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'start', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_STOP, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'stop', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_PAUSE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'pause', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_LOCATE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'locate', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'clean_spot', 0, False) mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, 'return_to_base', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_set_fan_speed(hass, 'medium', 'vacuum.mqtttest')) mqtt_mock.async_publish.assert_called_once_with('vacuum/set_fan_speed', 'medium', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', entity_id='vacuum.mqtttest')) mqtt_mock.async_publish.assert_called_once_with('vacuum/send_command', '44 FE 93', 0, False) mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', {'key': 'value'}, entity_id='vacuum.mqtttest')) assert (json.loads(mqtt_mock.async_publish.mock_calls[(- 1)][1][1]) == {'command': '44 FE 93', 'key': 'value'})
async def test_commands_without_supported_features(hass, mqtt_mock): 'Test commands which are not supported by the vacuum.' config = deepcopy(DEFAULT_CONFIG) services = mqttvacuum.STRING_TO_SERVICE['status'] config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(services, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) (await hass.services.async_call(DOMAIN, SERVICE_START, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_PAUSE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_STOP, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_LOCATE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await common.async_set_fan_speed(hass, 'medium', 'vacuum.mqtttest')) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', {'key': 'value'}, entity_id='vacuum.mqtttest')) mqtt_mock.async_publish.assert_not_called()
4,664,079,603,008,944,000
Test commands which are not supported by the vacuum.
tests/components/mqtt/test_state_vacuum.py
test_commands_without_supported_features
FuqiangSong/home-assistant
python
async def test_commands_without_supported_features(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) services = mqttvacuum.STRING_TO_SERVICE['status'] config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(services, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) (await hass.services.async_call(DOMAIN, SERVICE_START, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_PAUSE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_STOP, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_LOCATE, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, {'entity_id': ENTITY_MATCH_ALL}, blocking=True)) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await common.async_set_fan_speed(hass, 'medium', 'vacuum.mqtttest')) mqtt_mock.async_publish.assert_not_called() mqtt_mock.async_publish.reset_mock() (await common.async_send_command(hass, '44 FE 93', {'key': 'value'}, entity_id='vacuum.mqtttest')) mqtt_mock.async_publish.assert_not_called()
async def test_status(hass, mqtt_mock): 'Test status updates from the vacuum.' config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) message = '{\n "battery_level": 54,\n "state": "cleaning",\n "fan_speed": "max"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') assert (state.attributes.get(ATTR_FAN_SPEED) == 'max') message = '{\n "battery_level": 61,\n "state": "docked",\n "fan_speed": "min"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_DOCKED) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-charging-60') assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 61) assert (state.attributes.get(ATTR_FAN_SPEED) == 'min') assert (state.attributes.get(ATTR_FAN_SPEED_LIST) == ['min', 'medium', 'high', 'max'])
-3,601,749,714,720,768,000
Test status updates from the vacuum.
tests/components/mqtt/test_state_vacuum.py
test_status
FuqiangSong/home-assistant
python
async def test_status(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) message = '{\n "battery_level": 54,\n "state": "cleaning",\n "fan_speed": "max"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') assert (state.attributes.get(ATTR_FAN_SPEED) == 'max') message = '{\n "battery_level": 61,\n "state": "docked",\n "fan_speed": "min"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_DOCKED) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-charging-60') assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 61) assert (state.attributes.get(ATTR_FAN_SPEED) == 'min') assert (state.attributes.get(ATTR_FAN_SPEED_LIST) == ['min', 'medium', 'high', 'max'])
async def test_no_fan_vacuum(hass, mqtt_mock): 'Test status updates from the vacuum when fan is not supported.' config = deepcopy(DEFAULT_CONFIG) del config[mqttvacuum.CONF_FAN_SPEED_LIST] config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) message = '{\n "battery_level": 54,\n "state": "cleaning"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_FAN_SPEED) is None) assert (state.attributes.get(ATTR_FAN_SPEED_LIST) is None) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') message = '{\n "battery_level": 54,\n "state": "cleaning",\n "fan_speed": "max"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_FAN_SPEED) is None) assert (state.attributes.get(ATTR_FAN_SPEED_LIST) is None) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') message = '{\n "battery_level": 61,\n "state": "docked"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_DOCKED) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-charging-60') assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 61)
-5,363,847,290,666,027,000
Test status updates from the vacuum when fan is not supported.
tests/components/mqtt/test_state_vacuum.py
test_no_fan_vacuum
FuqiangSong/home-assistant
python
async def test_no_fan_vacuum(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) del config[mqttvacuum.CONF_FAN_SPEED_LIST] config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) message = '{\n "battery_level": 54,\n "state": "cleaning"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_FAN_SPEED) is None) assert (state.attributes.get(ATTR_FAN_SPEED_LIST) is None) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') message = '{\n "battery_level": 54,\n "state": "cleaning",\n "fan_speed": "max"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_CLEANING) assert (state.attributes.get(ATTR_FAN_SPEED) is None) assert (state.attributes.get(ATTR_FAN_SPEED_LIST) is None) assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 54) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-50') message = '{\n "battery_level": 61,\n "state": "docked"\n }' async_fire_mqtt_message(hass, 'vacuum/state', message) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_DOCKED) assert (state.attributes.get(ATTR_BATTERY_ICON) == 'mdi:battery-charging-60') assert (state.attributes.get(ATTR_BATTERY_LEVEL) == 61)