repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
0101/pipetools
pipetools/utils.py
take_until
def take_until(condition): """ >>> [1, 4, 6, 4, 1] > take_until(X > 5) | list [1, 4] >>> [1, 4, 6, 4, 1] > take_until(X > 5).including | list [1, 4, 6] """ f = partial(takewhile, pipe | condition | operator.not_) f.attrs = {'including': take_until_including(condition)} return f
python
def take_until(condition): """ >>> [1, 4, 6, 4, 1] > take_until(X > 5) | list [1, 4] >>> [1, 4, 6, 4, 1] > take_until(X > 5).including | list [1, 4, 6] """ f = partial(takewhile, pipe | condition | operator.not_) f.attrs = {'including': take_until_including(condition)} return f
[ "def", "take_until", "(", "condition", ")", ":", "f", "=", "partial", "(", "takewhile", ",", "pipe", "|", "condition", "|", "operator", ".", "not_", ")", "f", ".", "attrs", "=", "{", "'including'", ":", "take_until_including", "(", "condition", ")", "}", "return", "f" ]
>>> [1, 4, 6, 4, 1] > take_until(X > 5) | list [1, 4] >>> [1, 4, 6, 4, 1] > take_until(X > 5).including | list [1, 4, 6]
[ ">>>", "[", "1", "4", "6", "4", "1", "]", ">", "take_until", "(", "X", ">", "5", ")", "|", "list", "[", "1", "4", "]" ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L319-L329
0101/pipetools
pipetools/utils.py
take_until_including
def take_until_including(condition): """ >>> [1, 4, 6, 4, 1] > take_until_including(X > 5) | list [1, 4, 6] """ def take_until_including_(interable): for i in interable: if not condition(i): yield i else: yield i break return take_until_including_
python
def take_until_including(condition): """ >>> [1, 4, 6, 4, 1] > take_until_including(X > 5) | list [1, 4, 6] """ def take_until_including_(interable): for i in interable: if not condition(i): yield i else: yield i break return take_until_including_
[ "def", "take_until_including", "(", "condition", ")", ":", "def", "take_until_including_", "(", "interable", ")", ":", "for", "i", "in", "interable", ":", "if", "not", "condition", "(", "i", ")", ":", "yield", "i", "else", ":", "yield", "i", "break", "return", "take_until_including_" ]
>>> [1, 4, 6, 4, 1] > take_until_including(X > 5) | list [1, 4, 6]
[ ">>>", "[", "1", "4", "6", "4", "1", "]", ">", "take_until_including", "(", "X", ">", "5", ")", "|", "list", "[", "1", "4", "6", "]" ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L334-L346
0101/pipetools
pipetools/main.py
xpartial
def xpartial(func, *xargs, **xkwargs): """ Like :func:`functools.partial`, but can take an :class:`XObject` placeholder that will be replaced with the first positional argument when the partially applied function is called. Useful when the function's positional arguments' order doesn't fit your situation, e.g.: >>> reverse_range = xpartial(range, X, 0, -1) >>> reverse_range(5) [5, 4, 3, 2, 1] It can also be used to transform the positional argument to a keyword argument, which can come in handy inside a *pipe*:: xpartial(objects.get, id=X) Also the XObjects are evaluated, which can be used for some sort of destructuring of the argument:: xpartial(somefunc, name=X.name, number=X.contacts['number']) Lastly, unlike :func:`functools.partial`, this creates a regular function which will bind to classes (like the ``curry`` function from ``django.utils.functional``). """ any_x = any(isinstance(a, XObject) for a in xargs + tuple(xkwargs.values())) use = lambda x, value: (~x)(value) if isinstance(x, XObject) else x @wraps(func, assigned=filter(partial(hasattr, func), WRAPPER_ASSIGNMENTS)) def xpartially_applied(*func_args, **func_kwargs): if any_x: if not func_args: raise ValueError('Function "%s" partially applied with an ' 'X placeholder but called with no positional arguments.' % get_name(func)) first = func_args[0] rest = func_args[1:] args = tuple(use(x, first) for x in xargs) + rest kwargs = dict((k, use(x, first)) for k, x in dict_items(xkwargs)) kwargs.update(func_kwargs) else: args = xargs + func_args kwargs = dict(xkwargs, **func_kwargs) return func(*args, **kwargs) name = lambda: '%s(%s)' % (get_name(func), repr_args(*xargs, **xkwargs)) return set_name(name, xpartially_applied)
python
def xpartial(func, *xargs, **xkwargs): """ Like :func:`functools.partial`, but can take an :class:`XObject` placeholder that will be replaced with the first positional argument when the partially applied function is called. Useful when the function's positional arguments' order doesn't fit your situation, e.g.: >>> reverse_range = xpartial(range, X, 0, -1) >>> reverse_range(5) [5, 4, 3, 2, 1] It can also be used to transform the positional argument to a keyword argument, which can come in handy inside a *pipe*:: xpartial(objects.get, id=X) Also the XObjects are evaluated, which can be used for some sort of destructuring of the argument:: xpartial(somefunc, name=X.name, number=X.contacts['number']) Lastly, unlike :func:`functools.partial`, this creates a regular function which will bind to classes (like the ``curry`` function from ``django.utils.functional``). """ any_x = any(isinstance(a, XObject) for a in xargs + tuple(xkwargs.values())) use = lambda x, value: (~x)(value) if isinstance(x, XObject) else x @wraps(func, assigned=filter(partial(hasattr, func), WRAPPER_ASSIGNMENTS)) def xpartially_applied(*func_args, **func_kwargs): if any_x: if not func_args: raise ValueError('Function "%s" partially applied with an ' 'X placeholder but called with no positional arguments.' % get_name(func)) first = func_args[0] rest = func_args[1:] args = tuple(use(x, first) for x in xargs) + rest kwargs = dict((k, use(x, first)) for k, x in dict_items(xkwargs)) kwargs.update(func_kwargs) else: args = xargs + func_args kwargs = dict(xkwargs, **func_kwargs) return func(*args, **kwargs) name = lambda: '%s(%s)' % (get_name(func), repr_args(*xargs, **xkwargs)) return set_name(name, xpartially_applied)
[ "def", "xpartial", "(", "func", ",", "*", "xargs", ",", "*", "*", "xkwargs", ")", ":", "any_x", "=", "any", "(", "isinstance", "(", "a", ",", "XObject", ")", "for", "a", "in", "xargs", "+", "tuple", "(", "xkwargs", ".", "values", "(", ")", ")", ")", "use", "=", "lambda", "x", ",", "value", ":", "(", "~", "x", ")", "(", "value", ")", "if", "isinstance", "(", "x", ",", "XObject", ")", "else", "x", "@", "wraps", "(", "func", ",", "assigned", "=", "filter", "(", "partial", "(", "hasattr", ",", "func", ")", ",", "WRAPPER_ASSIGNMENTS", ")", ")", "def", "xpartially_applied", "(", "*", "func_args", ",", "*", "*", "func_kwargs", ")", ":", "if", "any_x", ":", "if", "not", "func_args", ":", "raise", "ValueError", "(", "'Function \"%s\" partially applied with an '", "'X placeholder but called with no positional arguments.'", "%", "get_name", "(", "func", ")", ")", "first", "=", "func_args", "[", "0", "]", "rest", "=", "func_args", "[", "1", ":", "]", "args", "=", "tuple", "(", "use", "(", "x", ",", "first", ")", "for", "x", "in", "xargs", ")", "+", "rest", "kwargs", "=", "dict", "(", "(", "k", ",", "use", "(", "x", ",", "first", ")", ")", "for", "k", ",", "x", "in", "dict_items", "(", "xkwargs", ")", ")", "kwargs", ".", "update", "(", "func_kwargs", ")", "else", ":", "args", "=", "xargs", "+", "func_args", "kwargs", "=", "dict", "(", "xkwargs", ",", "*", "*", "func_kwargs", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "name", "=", "lambda", ":", "'%s(%s)'", "%", "(", "get_name", "(", "func", ")", ",", "repr_args", "(", "*", "xargs", ",", "*", "*", "xkwargs", ")", ")", "return", "set_name", "(", "name", ",", "xpartially_applied", ")" ]
Like :func:`functools.partial`, but can take an :class:`XObject` placeholder that will be replaced with the first positional argument when the partially applied function is called. Useful when the function's positional arguments' order doesn't fit your situation, e.g.: >>> reverse_range = xpartial(range, X, 0, -1) >>> reverse_range(5) [5, 4, 3, 2, 1] It can also be used to transform the positional argument to a keyword argument, which can come in handy inside a *pipe*:: xpartial(objects.get, id=X) Also the XObjects are evaluated, which can be used for some sort of destructuring of the argument:: xpartial(somefunc, name=X.name, number=X.contacts['number']) Lastly, unlike :func:`functools.partial`, this creates a regular function which will bind to classes (like the ``curry`` function from ``django.utils.functional``).
[ "Like", ":", "func", ":", "functools", ".", "partial", "but", "can", "take", "an", ":", "class", ":", "XObject", "placeholder", "that", "will", "be", "replaced", "with", "the", "first", "positional", "argument", "when", "the", "partially", "applied", "function", "is", "called", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/main.py#L207-L255
pytroll/pyorbital
pyorbital/astronomy.py
gmst
def gmst(utc_time): """Greenwich mean sidereal utc_time, in radians. As defined in the AIAA 2006 implementation: http://www.celestrak.com/publications/AIAA/2006-6753/ """ ut1 = jdays2000(utc_time) / 36525.0 theta = 67310.54841 + ut1 * (876600 * 3600 + 8640184.812866 + ut1 * (0.093104 - ut1 * 6.2 * 10e-6)) return np.deg2rad(theta / 240.0) % (2 * np.pi)
python
def gmst(utc_time): """Greenwich mean sidereal utc_time, in radians. As defined in the AIAA 2006 implementation: http://www.celestrak.com/publications/AIAA/2006-6753/ """ ut1 = jdays2000(utc_time) / 36525.0 theta = 67310.54841 + ut1 * (876600 * 3600 + 8640184.812866 + ut1 * (0.093104 - ut1 * 6.2 * 10e-6)) return np.deg2rad(theta / 240.0) % (2 * np.pi)
[ "def", "gmst", "(", "utc_time", ")", ":", "ut1", "=", "jdays2000", "(", "utc_time", ")", "/", "36525.0", "theta", "=", "67310.54841", "+", "ut1", "*", "(", "876600", "*", "3600", "+", "8640184.812866", "+", "ut1", "*", "(", "0.093104", "-", "ut1", "*", "6.2", "*", "10e-6", ")", ")", "return", "np", ".", "deg2rad", "(", "theta", "/", "240.0", ")", "%", "(", "2", "*", "np", ".", "pi", ")" ]
Greenwich mean sidereal utc_time, in radians. As defined in the AIAA 2006 implementation: http://www.celestrak.com/publications/AIAA/2006-6753/
[ "Greenwich", "mean", "sidereal", "utc_time", "in", "radians", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L54-L63
pytroll/pyorbital
pyorbital/astronomy.py
sun_ecliptic_longitude
def sun_ecliptic_longitude(utc_time): """Ecliptic longitude of the sun at *utc_time*. """ jdate = jdays2000(utc_time) / 36525.0 # mean anomaly, rad m_a = np.deg2rad(357.52910 + 35999.05030 * jdate - 0.0001559 * jdate * jdate - 0.00000048 * jdate * jdate * jdate) # mean longitude, deg l_0 = 280.46645 + 36000.76983 * jdate + 0.0003032 * jdate * jdate d_l = ((1.914600 - 0.004817 * jdate - 0.000014 * jdate * jdate) * np.sin(m_a) + (0.019993 - 0.000101 * jdate) * np.sin(2 * m_a) + 0.000290 * np.sin(3 * m_a)) # true longitude, deg l__ = l_0 + d_l return np.deg2rad(l__)
python
def sun_ecliptic_longitude(utc_time): """Ecliptic longitude of the sun at *utc_time*. """ jdate = jdays2000(utc_time) / 36525.0 # mean anomaly, rad m_a = np.deg2rad(357.52910 + 35999.05030 * jdate - 0.0001559 * jdate * jdate - 0.00000048 * jdate * jdate * jdate) # mean longitude, deg l_0 = 280.46645 + 36000.76983 * jdate + 0.0003032 * jdate * jdate d_l = ((1.914600 - 0.004817 * jdate - 0.000014 * jdate * jdate) * np.sin(m_a) + (0.019993 - 0.000101 * jdate) * np.sin(2 * m_a) + 0.000290 * np.sin(3 * m_a)) # true longitude, deg l__ = l_0 + d_l return np.deg2rad(l__)
[ "def", "sun_ecliptic_longitude", "(", "utc_time", ")", ":", "jdate", "=", "jdays2000", "(", "utc_time", ")", "/", "36525.0", "# mean anomaly, rad", "m_a", "=", "np", ".", "deg2rad", "(", "357.52910", "+", "35999.05030", "*", "jdate", "-", "0.0001559", "*", "jdate", "*", "jdate", "-", "0.00000048", "*", "jdate", "*", "jdate", "*", "jdate", ")", "# mean longitude, deg", "l_0", "=", "280.46645", "+", "36000.76983", "*", "jdate", "+", "0.0003032", "*", "jdate", "*", "jdate", "d_l", "=", "(", "(", "1.914600", "-", "0.004817", "*", "jdate", "-", "0.000014", "*", "jdate", "*", "jdate", ")", "*", "np", ".", "sin", "(", "m_a", ")", "+", "(", "0.019993", "-", "0.000101", "*", "jdate", ")", "*", "np", ".", "sin", "(", "2", "*", "m_a", ")", "+", "0.000290", "*", "np", ".", "sin", "(", "3", "*", "m_a", ")", ")", "# true longitude, deg", "l__", "=", "l_0", "+", "d_l", "return", "np", ".", "deg2rad", "(", "l__", ")" ]
Ecliptic longitude of the sun at *utc_time*.
[ "Ecliptic", "longitude", "of", "the", "sun", "at", "*", "utc_time", "*", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L73-L88
pytroll/pyorbital
pyorbital/astronomy.py
sun_ra_dec
def sun_ra_dec(utc_time): """Right ascension and declination of the sun at *utc_time*. """ jdate = jdays2000(utc_time) / 36525.0 eps = np.deg2rad(23.0 + 26.0 / 60.0 + 21.448 / 3600.0 - (46.8150 * jdate + 0.00059 * jdate * jdate - 0.001813 * jdate * jdate * jdate) / 3600) eclon = sun_ecliptic_longitude(utc_time) x__ = np.cos(eclon) y__ = np.cos(eps) * np.sin(eclon) z__ = np.sin(eps) * np.sin(eclon) r__ = np.sqrt(1.0 - z__ * z__) # sun declination declination = np.arctan2(z__, r__) # right ascension right_ascension = 2 * np.arctan2(y__, (x__ + r__)) return right_ascension, declination
python
def sun_ra_dec(utc_time): """Right ascension and declination of the sun at *utc_time*. """ jdate = jdays2000(utc_time) / 36525.0 eps = np.deg2rad(23.0 + 26.0 / 60.0 + 21.448 / 3600.0 - (46.8150 * jdate + 0.00059 * jdate * jdate - 0.001813 * jdate * jdate * jdate) / 3600) eclon = sun_ecliptic_longitude(utc_time) x__ = np.cos(eclon) y__ = np.cos(eps) * np.sin(eclon) z__ = np.sin(eps) * np.sin(eclon) r__ = np.sqrt(1.0 - z__ * z__) # sun declination declination = np.arctan2(z__, r__) # right ascension right_ascension = 2 * np.arctan2(y__, (x__ + r__)) return right_ascension, declination
[ "def", "sun_ra_dec", "(", "utc_time", ")", ":", "jdate", "=", "jdays2000", "(", "utc_time", ")", "/", "36525.0", "eps", "=", "np", ".", "deg2rad", "(", "23.0", "+", "26.0", "/", "60.0", "+", "21.448", "/", "3600.0", "-", "(", "46.8150", "*", "jdate", "+", "0.00059", "*", "jdate", "*", "jdate", "-", "0.001813", "*", "jdate", "*", "jdate", "*", "jdate", ")", "/", "3600", ")", "eclon", "=", "sun_ecliptic_longitude", "(", "utc_time", ")", "x__", "=", "np", ".", "cos", "(", "eclon", ")", "y__", "=", "np", ".", "cos", "(", "eps", ")", "*", "np", ".", "sin", "(", "eclon", ")", "z__", "=", "np", ".", "sin", "(", "eps", ")", "*", "np", ".", "sin", "(", "eclon", ")", "r__", "=", "np", ".", "sqrt", "(", "1.0", "-", "z__", "*", "z__", ")", "# sun declination", "declination", "=", "np", ".", "arctan2", "(", "z__", ",", "r__", ")", "# right ascension", "right_ascension", "=", "2", "*", "np", ".", "arctan2", "(", "y__", ",", "(", "x__", "+", "r__", ")", ")", "return", "right_ascension", ",", "declination" ]
Right ascension and declination of the sun at *utc_time*.
[ "Right", "ascension", "and", "declination", "of", "the", "sun", "at", "*", "utc_time", "*", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L91-L107
pytroll/pyorbital
pyorbital/astronomy.py
get_alt_az
def get_alt_az(utc_time, lon, lat): """Return sun altitude and azimuth from *utc_time*, *lon*, and *lat*. lon,lat in degrees What is the unit of the returned angles and heights!? FIXME! """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) ra_, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, ra_) return (np.arcsin(np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__)), np.arctan2(-np.sin(h__), (np.cos(lat) * np.tan(dec) - np.sin(lat) * np.cos(h__))))
python
def get_alt_az(utc_time, lon, lat): """Return sun altitude and azimuth from *utc_time*, *lon*, and *lat*. lon,lat in degrees What is the unit of the returned angles and heights!? FIXME! """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) ra_, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, ra_) return (np.arcsin(np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__)), np.arctan2(-np.sin(h__), (np.cos(lat) * np.tan(dec) - np.sin(lat) * np.cos(h__))))
[ "def", "get_alt_az", "(", "utc_time", ",", "lon", ",", "lat", ")", ":", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "ra_", ",", "dec", "=", "sun_ra_dec", "(", "utc_time", ")", "h__", "=", "_local_hour_angle", "(", "utc_time", ",", "lon", ",", "ra_", ")", "return", "(", "np", ".", "arcsin", "(", "np", ".", "sin", "(", "lat", ")", "*", "np", ".", "sin", "(", "dec", ")", "+", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "cos", "(", "dec", ")", "*", "np", ".", "cos", "(", "h__", ")", ")", ",", "np", ".", "arctan2", "(", "-", "np", ".", "sin", "(", "h__", ")", ",", "(", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "tan", "(", "dec", ")", "-", "np", ".", "sin", "(", "lat", ")", "*", "np", ".", "cos", "(", "h__", ")", ")", ")", ")" ]
Return sun altitude and azimuth from *utc_time*, *lon*, and *lat*. lon,lat in degrees What is the unit of the returned angles and heights!? FIXME!
[ "Return", "sun", "altitude", "and", "azimuth", "from", "*", "utc_time", "*", "*", "lon", "*", "and", "*", "lat", "*", ".", "lon", "lat", "in", "degrees", "What", "is", "the", "unit", "of", "the", "returned", "angles", "and", "heights!?", "FIXME!" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L118-L131
pytroll/pyorbital
pyorbital/astronomy.py
cos_zen
def cos_zen(utc_time, lon, lat): """Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees. """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) r_a, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, r_a) return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__))
python
def cos_zen(utc_time, lon, lat): """Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees. """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) r_a, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, r_a) return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__))
[ "def", "cos_zen", "(", "utc_time", ",", "lon", ",", "lat", ")", ":", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "r_a", ",", "dec", "=", "sun_ra_dec", "(", "utc_time", ")", "h__", "=", "_local_hour_angle", "(", "utc_time", ",", "lon", ",", "r_a", ")", "return", "(", "np", ".", "sin", "(", "lat", ")", "*", "np", ".", "sin", "(", "dec", ")", "+", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "cos", "(", "dec", ")", "*", "np", ".", "cos", "(", "h__", ")", ")" ]
Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees.
[ "Cosine", "of", "the", "sun", "-", "zenith", "angle", "for", "*", "lon", "*", "*", "lat", "*", "at", "*", "utc_time", "*", ".", "utc_time", ":", "datetime", ".", "datetime", "instance", "of", "the", "UTC", "time", "lon", "and", "lat", "in", "degrees", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L134-L144
pytroll/pyorbital
pyorbital/astronomy.py
sun_zenith_angle
def sun_zenith_angle(utc_time, lon, lat): """Sun-zenith angle for *lon*, *lat* at *utc_time*. lon,lat in degrees. The angle returned is given in degrees """ return np.rad2deg(np.arccos(cos_zen(utc_time, lon, lat)))
python
def sun_zenith_angle(utc_time, lon, lat): """Sun-zenith angle for *lon*, *lat* at *utc_time*. lon,lat in degrees. The angle returned is given in degrees """ return np.rad2deg(np.arccos(cos_zen(utc_time, lon, lat)))
[ "def", "sun_zenith_angle", "(", "utc_time", ",", "lon", ",", "lat", ")", ":", "return", "np", ".", "rad2deg", "(", "np", ".", "arccos", "(", "cos_zen", "(", "utc_time", ",", "lon", ",", "lat", ")", ")", ")" ]
Sun-zenith angle for *lon*, *lat* at *utc_time*. lon,lat in degrees. The angle returned is given in degrees
[ "Sun", "-", "zenith", "angle", "for", "*", "lon", "*", "*", "lat", "*", "at", "*", "utc_time", "*", ".", "lon", "lat", "in", "degrees", ".", "The", "angle", "returned", "is", "given", "in", "degrees" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L147-L152
pytroll/pyorbital
pyorbital/astronomy.py
sun_earth_distance_correction
def sun_earth_distance_correction(utc_time): """Calculate the sun earth distance correction, relative to 1 AU. """ year = 365.256363004 # This is computed from # http://curious.astro.cornell.edu/question.php?number=582 # AU = 149597870700.0 # a = 149598261000.0 # theta = (jdays2000(utc_time) - 2) * (2 * np.pi) / year # e = 0.01671123 # r = a*(1-e*e)/(1+e * np.cos(theta)) # corr_me = (r / AU) ** 2 # from known software. corr = 1 - 0.0334 * np.cos(2 * np.pi * (jdays2000(utc_time) - 2) / year) return corr
python
def sun_earth_distance_correction(utc_time): """Calculate the sun earth distance correction, relative to 1 AU. """ year = 365.256363004 # This is computed from # http://curious.astro.cornell.edu/question.php?number=582 # AU = 149597870700.0 # a = 149598261000.0 # theta = (jdays2000(utc_time) - 2) * (2 * np.pi) / year # e = 0.01671123 # r = a*(1-e*e)/(1+e * np.cos(theta)) # corr_me = (r / AU) ** 2 # from known software. corr = 1 - 0.0334 * np.cos(2 * np.pi * (jdays2000(utc_time) - 2) / year) return corr
[ "def", "sun_earth_distance_correction", "(", "utc_time", ")", ":", "year", "=", "365.256363004", "# This is computed from", "# http://curious.astro.cornell.edu/question.php?number=582", "# AU = 149597870700.0", "# a = 149598261000.0", "# theta = (jdays2000(utc_time) - 2) * (2 * np.pi) / year", "# e = 0.01671123", "# r = a*(1-e*e)/(1+e * np.cos(theta))", "# corr_me = (r / AU) ** 2", "# from known software.", "corr", "=", "1", "-", "0.0334", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "*", "(", "jdays2000", "(", "utc_time", ")", "-", "2", ")", "/", "year", ")", "return", "corr" ]
Calculate the sun earth distance correction, relative to 1 AU.
[ "Calculate", "the", "sun", "earth", "distance", "correction", "relative", "to", "1", "AU", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L155-L171
pytroll/pyorbital
pyorbital/astronomy.py
observer_position
def observer_position(time, lon, lat, alt): """Calculate observer ECI position. http://celestrak.com/columns/v02n03/ """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (gmst(time) + lon) % (2 * np.pi) c = 1 / np.sqrt(1 + F * (F - 2) * np.sin(lat)**2) sq = c * (1 - F)**2 achcp = (A * c + alt) * np.cos(lat) x = achcp * np.cos(theta) # kilometers y = achcp * np.sin(theta) z = (A * sq + alt) * np.sin(lat) vx = -MFACTOR * y # kilometers/second vy = MFACTOR * x vz = 0 return (x, y, z), (vx, vy, vz)
python
def observer_position(time, lon, lat, alt): """Calculate observer ECI position. http://celestrak.com/columns/v02n03/ """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (gmst(time) + lon) % (2 * np.pi) c = 1 / np.sqrt(1 + F * (F - 2) * np.sin(lat)**2) sq = c * (1 - F)**2 achcp = (A * c + alt) * np.cos(lat) x = achcp * np.cos(theta) # kilometers y = achcp * np.sin(theta) z = (A * sq + alt) * np.sin(lat) vx = -MFACTOR * y # kilometers/second vy = MFACTOR * x vz = 0 return (x, y, z), (vx, vy, vz)
[ "def", "observer_position", "(", "time", ",", "lon", ",", "lat", ",", "alt", ")", ":", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "theta", "=", "(", "gmst", "(", "time", ")", "+", "lon", ")", "%", "(", "2", "*", "np", ".", "pi", ")", "c", "=", "1", "/", "np", ".", "sqrt", "(", "1", "+", "F", "*", "(", "F", "-", "2", ")", "*", "np", ".", "sin", "(", "lat", ")", "**", "2", ")", "sq", "=", "c", "*", "(", "1", "-", "F", ")", "**", "2", "achcp", "=", "(", "A", "*", "c", "+", "alt", ")", "*", "np", ".", "cos", "(", "lat", ")", "x", "=", "achcp", "*", "np", ".", "cos", "(", "theta", ")", "# kilometers", "y", "=", "achcp", "*", "np", ".", "sin", "(", "theta", ")", "z", "=", "(", "A", "*", "sq", "+", "alt", ")", "*", "np", ".", "sin", "(", "lat", ")", "vx", "=", "-", "MFACTOR", "*", "y", "# kilometers/second", "vy", "=", "MFACTOR", "*", "x", "vz", "=", "0", "return", "(", "x", ",", "y", ",", "z", ")", ",", "(", "vx", ",", "vy", ",", "vz", ")" ]
Calculate observer ECI position. http://celestrak.com/columns/v02n03/
[ "Calculate", "observer", "ECI", "position", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/astronomy.py#L174-L196
pytroll/pyorbital
pyorbital/orbital.py
get_observer_look
def get_observer_look(sat_lon, sat_lat, sat_alt, utc_time, lon, lat, alt): """Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation) """ (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = astronomy.observer_position( utc_time, sat_lon, sat_lat, sat_alt) (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \ astronomy.observer_position(utc_time, lon, lat, alt) lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi) rx = pos_x - opos_x ry = pos_y - opos_y rz = pos_z - opos_z sin_lat = np.sin(lat) cos_lat = np.cos(lat) sin_theta = np.sin(theta) cos_theta = np.cos(theta) top_s = sin_lat * cos_theta * rx + \ sin_lat * sin_theta * ry - cos_lat * rz top_e = -sin_theta * rx + cos_theta * ry top_z = cos_lat * cos_theta * rx + \ cos_lat * sin_theta * ry + sin_lat * rz az_ = np.arctan(-top_e / top_s) if has_xarray and isinstance(az_, xr.DataArray): az_data = az_.data else: az_data = az_ if has_dask and isinstance(az_data, da.Array): az_data = da.where(top_s > 0, az_data + np.pi, az_data) az_data = da.where(az_data < 0, az_data + 2 * np.pi, az_data) else: az_data[np.where(top_s > 0)] += np.pi az_data[np.where(az_data < 0)] += 2 * np.pi if has_xarray and isinstance(az_, xr.DataArray): az_.data = az_data else: az_ = az_data rg_ = np.sqrt(rx * rx + ry * ry + rz * rz) el_ = np.arcsin(top_z / rg_) return np.rad2deg(az_), np.rad2deg(el_)
python
def get_observer_look(sat_lon, sat_lat, sat_alt, utc_time, lon, lat, alt): """Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation) """ (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = astronomy.observer_position( utc_time, sat_lon, sat_lat, sat_alt) (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \ astronomy.observer_position(utc_time, lon, lat, alt) lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi) rx = pos_x - opos_x ry = pos_y - opos_y rz = pos_z - opos_z sin_lat = np.sin(lat) cos_lat = np.cos(lat) sin_theta = np.sin(theta) cos_theta = np.cos(theta) top_s = sin_lat * cos_theta * rx + \ sin_lat * sin_theta * ry - cos_lat * rz top_e = -sin_theta * rx + cos_theta * ry top_z = cos_lat * cos_theta * rx + \ cos_lat * sin_theta * ry + sin_lat * rz az_ = np.arctan(-top_e / top_s) if has_xarray and isinstance(az_, xr.DataArray): az_data = az_.data else: az_data = az_ if has_dask and isinstance(az_data, da.Array): az_data = da.where(top_s > 0, az_data + np.pi, az_data) az_data = da.where(az_data < 0, az_data + 2 * np.pi, az_data) else: az_data[np.where(top_s > 0)] += np.pi az_data[np.where(az_data < 0)] += 2 * np.pi if has_xarray and isinstance(az_, xr.DataArray): az_.data = az_data else: az_ = az_data rg_ = np.sqrt(rx * rx + ry * ry + rz * rz) el_ = np.arcsin(top_z / rg_) return np.rad2deg(az_), np.rad2deg(el_)
[ "def", "get_observer_look", "(", "sat_lon", ",", "sat_lat", ",", "sat_alt", ",", "utc_time", ",", "lon", ",", "lat", ",", "alt", ")", ":", "(", "pos_x", ",", "pos_y", ",", "pos_z", ")", ",", "(", "vel_x", ",", "vel_y", ",", "vel_z", ")", "=", "astronomy", ".", "observer_position", "(", "utc_time", ",", "sat_lon", ",", "sat_lat", ",", "sat_alt", ")", "(", "opos_x", ",", "opos_y", ",", "opos_z", ")", ",", "(", "ovel_x", ",", "ovel_y", ",", "ovel_z", ")", "=", "astronomy", ".", "observer_position", "(", "utc_time", ",", "lon", ",", "lat", ",", "alt", ")", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "theta", "=", "(", "astronomy", ".", "gmst", "(", "utc_time", ")", "+", "lon", ")", "%", "(", "2", "*", "np", ".", "pi", ")", "rx", "=", "pos_x", "-", "opos_x", "ry", "=", "pos_y", "-", "opos_y", "rz", "=", "pos_z", "-", "opos_z", "sin_lat", "=", "np", ".", "sin", "(", "lat", ")", "cos_lat", "=", "np", ".", "cos", "(", "lat", ")", "sin_theta", "=", "np", ".", "sin", "(", "theta", ")", "cos_theta", "=", "np", ".", "cos", "(", "theta", ")", "top_s", "=", "sin_lat", "*", "cos_theta", "*", "rx", "+", "sin_lat", "*", "sin_theta", "*", "ry", "-", "cos_lat", "*", "rz", "top_e", "=", "-", "sin_theta", "*", "rx", "+", "cos_theta", "*", "ry", "top_z", "=", "cos_lat", "*", "cos_theta", "*", "rx", "+", "cos_lat", "*", "sin_theta", "*", "ry", "+", "sin_lat", "*", "rz", "az_", "=", "np", ".", "arctan", "(", "-", "top_e", "/", "top_s", ")", "if", "has_xarray", "and", "isinstance", "(", "az_", ",", "xr", ".", "DataArray", ")", ":", "az_data", "=", "az_", ".", "data", "else", ":", "az_data", "=", "az_", "if", "has_dask", "and", "isinstance", "(", "az_data", ",", "da", ".", "Array", ")", ":", "az_data", "=", "da", ".", "where", "(", "top_s", ">", "0", ",", "az_data", "+", "np", ".", "pi", ",", "az_data", ")", "az_data", "=", "da", ".", "where", "(", "az_data", "<", "0", ",", "az_data", "+", "2", "*", "np", ".", "pi", ",", "az_data", ")", "else", ":", "az_data", "[", "np", ".", "where", "(", "top_s", ">", "0", ")", "]", "+=", "np", ".", "pi", "az_data", "[", "np", ".", "where", "(", "az_data", "<", "0", ")", "]", "+=", "2", "*", "np", ".", "pi", "if", "has_xarray", "and", "isinstance", "(", "az_", ",", "xr", ".", "DataArray", ")", ":", "az_", ".", "data", "=", "az_data", "else", ":", "az_", "=", "az_data", "rg_", "=", "np", ".", "sqrt", "(", "rx", "*", "rx", "+", "ry", "*", "ry", "+", "rz", "*", "rz", ")", "el_", "=", "np", ".", "arcsin", "(", "top_z", "/", "rg_", ")", "return", "np", ".", "rad2deg", "(", "az_", ")", ",", "np", ".", "rad2deg", "(", "el_", ")" ]
Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation)
[ "Calculate", "observers", "look", "angle", "to", "a", "satellite", ".", "http", ":", "//", "celestrak", ".", "com", "/", "columns", "/", "v02n02", "/" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L90-L149
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_last_an_time
def get_last_an_time(self, utc_time): """Calculate time of last ascending node relative to the specified time """ # Propagate backwards to ascending node dt = np.timedelta64(10, 'm') t_old = utc_time t_new = t_old - dt pos0, vel0 = self.get_position(t_old, normalize=False) pos1, vel1 = self.get_position(t_new, normalize=False) while not (pos0[2] > 0 and pos1[2] < 0): pos0 = pos1 t_old = t_new t_new = t_old - dt pos1, vel1 = self.get_position(t_new, normalize=False) # Return if z within 1 km of an if np.abs(pos0[2]) < 1: return t_old elif np.abs(pos1[2]) < 1: return t_new # Bisect to z within 1 km while np.abs(pos1[2]) > 1: # pos0, vel0 = pos1, vel1 dt = (t_old - t_new) / 2 t_mid = t_old - dt pos1, vel1 = self.get_position(t_mid, normalize=False) if pos1[2] > 0: t_old = t_mid else: t_new = t_mid return t_mid
python
def get_last_an_time(self, utc_time): """Calculate time of last ascending node relative to the specified time """ # Propagate backwards to ascending node dt = np.timedelta64(10, 'm') t_old = utc_time t_new = t_old - dt pos0, vel0 = self.get_position(t_old, normalize=False) pos1, vel1 = self.get_position(t_new, normalize=False) while not (pos0[2] > 0 and pos1[2] < 0): pos0 = pos1 t_old = t_new t_new = t_old - dt pos1, vel1 = self.get_position(t_new, normalize=False) # Return if z within 1 km of an if np.abs(pos0[2]) < 1: return t_old elif np.abs(pos1[2]) < 1: return t_new # Bisect to z within 1 km while np.abs(pos1[2]) > 1: # pos0, vel0 = pos1, vel1 dt = (t_old - t_new) / 2 t_mid = t_old - dt pos1, vel1 = self.get_position(t_mid, normalize=False) if pos1[2] > 0: t_old = t_mid else: t_new = t_mid return t_mid
[ "def", "get_last_an_time", "(", "self", ",", "utc_time", ")", ":", "# Propagate backwards to ascending node", "dt", "=", "np", ".", "timedelta64", "(", "10", ",", "'m'", ")", "t_old", "=", "utc_time", "t_new", "=", "t_old", "-", "dt", "pos0", ",", "vel0", "=", "self", ".", "get_position", "(", "t_old", ",", "normalize", "=", "False", ")", "pos1", ",", "vel1", "=", "self", ".", "get_position", "(", "t_new", ",", "normalize", "=", "False", ")", "while", "not", "(", "pos0", "[", "2", "]", ">", "0", "and", "pos1", "[", "2", "]", "<", "0", ")", ":", "pos0", "=", "pos1", "t_old", "=", "t_new", "t_new", "=", "t_old", "-", "dt", "pos1", ",", "vel1", "=", "self", ".", "get_position", "(", "t_new", ",", "normalize", "=", "False", ")", "# Return if z within 1 km of an", "if", "np", ".", "abs", "(", "pos0", "[", "2", "]", ")", "<", "1", ":", "return", "t_old", "elif", "np", ".", "abs", "(", "pos1", "[", "2", "]", ")", "<", "1", ":", "return", "t_new", "# Bisect to z within 1 km", "while", "np", ".", "abs", "(", "pos1", "[", "2", "]", ")", ">", "1", ":", "# pos0, vel0 = pos1, vel1", "dt", "=", "(", "t_old", "-", "t_new", ")", "/", "2", "t_mid", "=", "t_old", "-", "dt", "pos1", ",", "vel1", "=", "self", ".", "get_position", "(", "t_mid", ",", "normalize", "=", "False", ")", "if", "pos1", "[", "2", "]", ">", "0", ":", "t_old", "=", "t_mid", "else", ":", "t_new", "=", "t_mid", "return", "t_mid" ]
Calculate time of last ascending node relative to the specified time
[ "Calculate", "time", "of", "last", "ascending", "node", "relative", "to", "the", "specified", "time" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L172-L206
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_position
def get_position(self, utc_time, normalize=True): """Get the cartesian position and velocity from the satellite. """ kep = self._sgdp4.propagate(utc_time) pos, vel = kep2xyz(kep) if normalize: pos /= XKMPER vel /= XKMPER * XMNPDA / SECDAY return pos, vel
python
def get_position(self, utc_time, normalize=True): """Get the cartesian position and velocity from the satellite. """ kep = self._sgdp4.propagate(utc_time) pos, vel = kep2xyz(kep) if normalize: pos /= XKMPER vel /= XKMPER * XMNPDA / SECDAY return pos, vel
[ "def", "get_position", "(", "self", ",", "utc_time", ",", "normalize", "=", "True", ")", ":", "kep", "=", "self", ".", "_sgdp4", ".", "propagate", "(", "utc_time", ")", "pos", ",", "vel", "=", "kep2xyz", "(", "kep", ")", "if", "normalize", ":", "pos", "/=", "XKMPER", "vel", "/=", "XKMPER", "*", "XMNPDA", "/", "SECDAY", "return", "pos", ",", "vel" ]
Get the cartesian position and velocity from the satellite.
[ "Get", "the", "cartesian", "position", "and", "velocity", "from", "the", "satellite", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L208-L219
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_lonlatalt
def get_lonlatalt(self, utc_time): """Calculate sublon, sublat and altitude of satellite. http://celestrak.com/columns/v02n03/ """ (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position( utc_time, normalize=True) lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - astronomy.gmst(utc_time)) % (2 * np.pi)) lon = np.where(lon > np.pi, lon - np.pi * 2, lon) lon = np.where(lon <= -np.pi, lon + np.pi * 2, lon) r = np.sqrt(pos_x ** 2 + pos_y ** 2) lat = np.arctan2(pos_z, r) e2 = F * (2 - F) while True: lat2 = lat c = 1 / (np.sqrt(1 - e2 * (np.sin(lat2) ** 2))) lat = np.arctan2(pos_z + c * e2 * np.sin(lat2), r) if np.all(abs(lat - lat2) < 1e-10): break alt = r / np.cos(lat) - c alt *= A return np.rad2deg(lon), np.rad2deg(lat), alt
python
def get_lonlatalt(self, utc_time): """Calculate sublon, sublat and altitude of satellite. http://celestrak.com/columns/v02n03/ """ (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position( utc_time, normalize=True) lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - astronomy.gmst(utc_time)) % (2 * np.pi)) lon = np.where(lon > np.pi, lon - np.pi * 2, lon) lon = np.where(lon <= -np.pi, lon + np.pi * 2, lon) r = np.sqrt(pos_x ** 2 + pos_y ** 2) lat = np.arctan2(pos_z, r) e2 = F * (2 - F) while True: lat2 = lat c = 1 / (np.sqrt(1 - e2 * (np.sin(lat2) ** 2))) lat = np.arctan2(pos_z + c * e2 * np.sin(lat2), r) if np.all(abs(lat - lat2) < 1e-10): break alt = r / np.cos(lat) - c alt *= A return np.rad2deg(lon), np.rad2deg(lat), alt
[ "def", "get_lonlatalt", "(", "self", ",", "utc_time", ")", ":", "(", "pos_x", ",", "pos_y", ",", "pos_z", ")", ",", "(", "vel_x", ",", "vel_y", ",", "vel_z", ")", "=", "self", ".", "get_position", "(", "utc_time", ",", "normalize", "=", "True", ")", "lon", "=", "(", "(", "np", ".", "arctan2", "(", "pos_y", "*", "XKMPER", ",", "pos_x", "*", "XKMPER", ")", "-", "astronomy", ".", "gmst", "(", "utc_time", ")", ")", "%", "(", "2", "*", "np", ".", "pi", ")", ")", "lon", "=", "np", ".", "where", "(", "lon", ">", "np", ".", "pi", ",", "lon", "-", "np", ".", "pi", "*", "2", ",", "lon", ")", "lon", "=", "np", ".", "where", "(", "lon", "<=", "-", "np", ".", "pi", ",", "lon", "+", "np", ".", "pi", "*", "2", ",", "lon", ")", "r", "=", "np", ".", "sqrt", "(", "pos_x", "**", "2", "+", "pos_y", "**", "2", ")", "lat", "=", "np", ".", "arctan2", "(", "pos_z", ",", "r", ")", "e2", "=", "F", "*", "(", "2", "-", "F", ")", "while", "True", ":", "lat2", "=", "lat", "c", "=", "1", "/", "(", "np", ".", "sqrt", "(", "1", "-", "e2", "*", "(", "np", ".", "sin", "(", "lat2", ")", "**", "2", ")", ")", ")", "lat", "=", "np", ".", "arctan2", "(", "pos_z", "+", "c", "*", "e2", "*", "np", ".", "sin", "(", "lat2", ")", ",", "r", ")", "if", "np", ".", "all", "(", "abs", "(", "lat", "-", "lat2", ")", "<", "1e-10", ")", ":", "break", "alt", "=", "r", "/", "np", ".", "cos", "(", "lat", ")", "-", "c", "alt", "*=", "A", "return", "np", ".", "rad2deg", "(", "lon", ")", ",", "np", ".", "rad2deg", "(", "lat", ")", ",", "alt" ]
Calculate sublon, sublat and altitude of satellite. http://celestrak.com/columns/v02n03/
[ "Calculate", "sublon", "sublat", "and", "altitude", "of", "satellite", ".", "http", ":", "//", "celestrak", ".", "com", "/", "columns", "/", "v02n03", "/" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L221-L245
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_observer_look
def get_observer_look(self, utc_time, lon, lat, alt): """Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation) """ utc_time = dt2np(utc_time) (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position( utc_time, normalize=False) (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \ astronomy.observer_position(utc_time, lon, lat, alt) lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi) rx = pos_x - opos_x ry = pos_y - opos_y rz = pos_z - opos_z sin_lat = np.sin(lat) cos_lat = np.cos(lat) sin_theta = np.sin(theta) cos_theta = np.cos(theta) top_s = sin_lat * cos_theta * rx + \ sin_lat * sin_theta * ry - cos_lat * rz top_e = -sin_theta * rx + cos_theta * ry top_z = cos_lat * cos_theta * rx + \ cos_lat * sin_theta * ry + sin_lat * rz az_ = np.arctan(-top_e / top_s) az_ = np.where(top_s > 0, az_ + np.pi, az_) az_ = np.where(az_ < 0, az_ + 2 * np.pi, az_) rg_ = np.sqrt(rx * rx + ry * ry + rz * rz) el_ = np.arcsin(top_z / rg_) return np.rad2deg(az_), np.rad2deg(el_)
python
def get_observer_look(self, utc_time, lon, lat, alt): """Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation) """ utc_time = dt2np(utc_time) (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position( utc_time, normalize=False) (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \ astronomy.observer_position(utc_time, lon, lat, alt) lon = np.deg2rad(lon) lat = np.deg2rad(lat) theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi) rx = pos_x - opos_x ry = pos_y - opos_y rz = pos_z - opos_z sin_lat = np.sin(lat) cos_lat = np.cos(lat) sin_theta = np.sin(theta) cos_theta = np.cos(theta) top_s = sin_lat * cos_theta * rx + \ sin_lat * sin_theta * ry - cos_lat * rz top_e = -sin_theta * rx + cos_theta * ry top_z = cos_lat * cos_theta * rx + \ cos_lat * sin_theta * ry + sin_lat * rz az_ = np.arctan(-top_e / top_s) az_ = np.where(top_s > 0, az_ + np.pi, az_) az_ = np.where(az_ < 0, az_ + 2 * np.pi, az_) rg_ = np.sqrt(rx * rx + ry * ry + rz * rz) el_ = np.arcsin(top_z / rg_) return np.rad2deg(az_), np.rad2deg(el_)
[ "def", "get_observer_look", "(", "self", ",", "utc_time", ",", "lon", ",", "lat", ",", "alt", ")", ":", "utc_time", "=", "dt2np", "(", "utc_time", ")", "(", "pos_x", ",", "pos_y", ",", "pos_z", ")", ",", "(", "vel_x", ",", "vel_y", ",", "vel_z", ")", "=", "self", ".", "get_position", "(", "utc_time", ",", "normalize", "=", "False", ")", "(", "opos_x", ",", "opos_y", ",", "opos_z", ")", ",", "(", "ovel_x", ",", "ovel_y", ",", "ovel_z", ")", "=", "astronomy", ".", "observer_position", "(", "utc_time", ",", "lon", ",", "lat", ",", "alt", ")", "lon", "=", "np", ".", "deg2rad", "(", "lon", ")", "lat", "=", "np", ".", "deg2rad", "(", "lat", ")", "theta", "=", "(", "astronomy", ".", "gmst", "(", "utc_time", ")", "+", "lon", ")", "%", "(", "2", "*", "np", ".", "pi", ")", "rx", "=", "pos_x", "-", "opos_x", "ry", "=", "pos_y", "-", "opos_y", "rz", "=", "pos_z", "-", "opos_z", "sin_lat", "=", "np", ".", "sin", "(", "lat", ")", "cos_lat", "=", "np", ".", "cos", "(", "lat", ")", "sin_theta", "=", "np", ".", "sin", "(", "theta", ")", "cos_theta", "=", "np", ".", "cos", "(", "theta", ")", "top_s", "=", "sin_lat", "*", "cos_theta", "*", "rx", "+", "sin_lat", "*", "sin_theta", "*", "ry", "-", "cos_lat", "*", "rz", "top_e", "=", "-", "sin_theta", "*", "rx", "+", "cos_theta", "*", "ry", "top_z", "=", "cos_lat", "*", "cos_theta", "*", "rx", "+", "cos_lat", "*", "sin_theta", "*", "ry", "+", "sin_lat", "*", "rz", "az_", "=", "np", ".", "arctan", "(", "-", "top_e", "/", "top_s", ")", "az_", "=", "np", ".", "where", "(", "top_s", ">", "0", ",", "az_", "+", "np", ".", "pi", ",", "az_", ")", "az_", "=", "np", ".", "where", "(", "az_", "<", "0", ",", "az_", "+", "2", "*", "np", ".", "pi", ",", "az_", ")", "rg_", "=", "np", ".", "sqrt", "(", "rx", "*", "rx", "+", "ry", "*", "ry", "+", "rz", "*", "rz", ")", "el_", "=", "np", ".", "arcsin", "(", "top_z", "/", "rg_", ")", "return", "np", ".", "rad2deg", "(", "az_", ")", ",", "np", ".", "rad2deg", "(", "el_", ")" ]
Calculate observers look angle to a satellite. http://celestrak.com/columns/v02n02/ utc_time: Observation time (datetime object) lon: Longitude of observer position on ground in degrees east lat: Latitude of observer position on ground in degrees north alt: Altitude above sea-level (geoid) of observer position on ground in km Return: (Azimuth, Elevation)
[ "Calculate", "observers", "look", "angle", "to", "a", "satellite", ".", "http", ":", "//", "celestrak", ".", "com", "/", "columns", "/", "v02n02", "/" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L253-L299
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_orbit_number
def get_orbit_number(self, utc_time, tbus_style=False): """Calculate orbit number at specified time. Optionally use TBUS-style orbit numbering (TLE orbit number + 1) """ utc_time = np.datetime64(utc_time) try: dt = astronomy._days(utc_time - self.orbit_elements.an_time) orbit_period = astronomy._days(self.orbit_elements.an_period) except AttributeError: pos_epoch, vel_epoch = self.get_position(self.tle.epoch, normalize=False) if np.abs(pos_epoch[2]) > 1 or not vel_epoch[2] > 0: # Epoch not at ascending node self.orbit_elements.an_time = self.get_last_an_time( self.tle.epoch) else: # Epoch at ascending node (z < 1 km) and positive v_z self.orbit_elements.an_time = self.tle.epoch self.orbit_elements.an_period = self.orbit_elements.an_time - \ self.get_last_an_time(self.orbit_elements.an_time - np.timedelta64(10, 'm')) dt = astronomy._days(utc_time - self.orbit_elements.an_time) orbit_period = astronomy._days(self.orbit_elements.an_period) orbit = int(self.tle.orbit + dt / orbit_period + self.tle.mean_motion_derivative * dt**2 + self.tle.mean_motion_sec_derivative * dt**3) if tbus_style: orbit += 1 return orbit
python
def get_orbit_number(self, utc_time, tbus_style=False): """Calculate orbit number at specified time. Optionally use TBUS-style orbit numbering (TLE orbit number + 1) """ utc_time = np.datetime64(utc_time) try: dt = astronomy._days(utc_time - self.orbit_elements.an_time) orbit_period = astronomy._days(self.orbit_elements.an_period) except AttributeError: pos_epoch, vel_epoch = self.get_position(self.tle.epoch, normalize=False) if np.abs(pos_epoch[2]) > 1 or not vel_epoch[2] > 0: # Epoch not at ascending node self.orbit_elements.an_time = self.get_last_an_time( self.tle.epoch) else: # Epoch at ascending node (z < 1 km) and positive v_z self.orbit_elements.an_time = self.tle.epoch self.orbit_elements.an_period = self.orbit_elements.an_time - \ self.get_last_an_time(self.orbit_elements.an_time - np.timedelta64(10, 'm')) dt = astronomy._days(utc_time - self.orbit_elements.an_time) orbit_period = astronomy._days(self.orbit_elements.an_period) orbit = int(self.tle.orbit + dt / orbit_period + self.tle.mean_motion_derivative * dt**2 + self.tle.mean_motion_sec_derivative * dt**3) if tbus_style: orbit += 1 return orbit
[ "def", "get_orbit_number", "(", "self", ",", "utc_time", ",", "tbus_style", "=", "False", ")", ":", "utc_time", "=", "np", ".", "datetime64", "(", "utc_time", ")", "try", ":", "dt", "=", "astronomy", ".", "_days", "(", "utc_time", "-", "self", ".", "orbit_elements", ".", "an_time", ")", "orbit_period", "=", "astronomy", ".", "_days", "(", "self", ".", "orbit_elements", ".", "an_period", ")", "except", "AttributeError", ":", "pos_epoch", ",", "vel_epoch", "=", "self", ".", "get_position", "(", "self", ".", "tle", ".", "epoch", ",", "normalize", "=", "False", ")", "if", "np", ".", "abs", "(", "pos_epoch", "[", "2", "]", ")", ">", "1", "or", "not", "vel_epoch", "[", "2", "]", ">", "0", ":", "# Epoch not at ascending node", "self", ".", "orbit_elements", ".", "an_time", "=", "self", ".", "get_last_an_time", "(", "self", ".", "tle", ".", "epoch", ")", "else", ":", "# Epoch at ascending node (z < 1 km) and positive v_z", "self", ".", "orbit_elements", ".", "an_time", "=", "self", ".", "tle", ".", "epoch", "self", ".", "orbit_elements", ".", "an_period", "=", "self", ".", "orbit_elements", ".", "an_time", "-", "self", ".", "get_last_an_time", "(", "self", ".", "orbit_elements", ".", "an_time", "-", "np", ".", "timedelta64", "(", "10", ",", "'m'", ")", ")", "dt", "=", "astronomy", ".", "_days", "(", "utc_time", "-", "self", ".", "orbit_elements", ".", "an_time", ")", "orbit_period", "=", "astronomy", ".", "_days", "(", "self", ".", "orbit_elements", ".", "an_period", ")", "orbit", "=", "int", "(", "self", ".", "tle", ".", "orbit", "+", "dt", "/", "orbit_period", "+", "self", ".", "tle", ".", "mean_motion_derivative", "*", "dt", "**", "2", "+", "self", ".", "tle", ".", "mean_motion_sec_derivative", "*", "dt", "**", "3", ")", "if", "tbus_style", ":", "orbit", "+=", "1", "return", "orbit" ]
Calculate orbit number at specified time. Optionally use TBUS-style orbit numbering (TLE orbit number + 1)
[ "Calculate", "orbit", "number", "at", "specified", "time", ".", "Optionally", "use", "TBUS", "-", "style", "orbit", "numbering", "(", "TLE", "orbit", "number", "+", "1", ")" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L301-L333
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_next_passes
def get_next_passes(self, utc_time, length, lon, lat, alt, tol=0.001, horizon=0): """Calculate passes for the next hours for a given start time and a given observer. Original by Martin. utc_time: Observation time (datetime object) length: Number of hours to find passes (int) lon: Longitude of observer position on ground (float) lat: Latitude of observer position on ground (float) alt: Altitude above sea-level (geoid) of observer position on ground (float) tol: precision of the result in seconds horizon: the elevation of horizon to compute risetime and falltime. Return: [(rise-time, fall-time, max-elevation-time), ...] """ def elevation(minutes): """Compute the elevation.""" return self.get_observer_look(utc_time + timedelta( minutes=np.float64(minutes)), lon, lat, alt)[1] - horizon def elevation_inv(minutes): """Compute the inverse of elevation.""" return -elevation(minutes) def get_root(fun, start, end, tol=0.01): """Root finding scheme""" x_0 = end x_1 = start fx_0 = fun(end) fx_1 = fun(start) if abs(fx_0) < abs(fx_1): fx_0, fx_1 = fx_1, fx_0 x_0, x_1 = x_1, x_0 x_n = optimize.brentq(fun, x_0, x_1) return x_n def get_max_parab(fun, start, end, tol=0.01): """Successive parabolic interpolation.""" a = float(start) c = float(end) b = (a + c) / 2.0 f_a = fun(a) f_b = fun(b) f_c = fun(c) x = b while True: x = x - 0.5 * (((b - a) ** 2 * (f_b - f_c) - (b - c) ** 2 * (f_b - f_a)) / ((b - a) * (f_b - f_c) - (b - c) * (f_b - f_a))) if np.isnan(x): return b if abs(b - x) <= tol: return x a, b, c = (a + x) / 2.0, x, (x + c) / 2.0 f_a, f_b, f_c = fun(a), fun(b), fun(c) # every minute times = utc_time + np.array([timedelta(minutes=minutes) for minutes in range(length * 60)]) elev = self.get_observer_look(times, lon, lat, alt)[1] - horizon zcs = np.where(np.diff(np.sign(elev)))[0] res = [] risetime = None falltime = None for guess in zcs: horizon_mins = get_root( elevation, guess, guess + 1.0, tol=tol / 60.0) horizon_time = utc_time + timedelta(minutes=horizon_mins) if elev[guess] < 0: risetime = horizon_time risemins = horizon_mins falltime = None else: falltime = horizon_time fallmins = horizon_mins if risetime: int_start = max(0, int(np.floor(risemins))) int_end = min(len(elev), int(np.ceil(fallmins) + 1)) middle = int_start + np.argmax(elev[int_start:int_end]) highest = utc_time + \ timedelta(minutes=get_max_parab( elevation_inv, max(risemins, middle - 1), min(fallmins, middle + 1), tol=tol / 60.0 )) res += [(risetime, falltime, highest)] risetime = None return res
python
def get_next_passes(self, utc_time, length, lon, lat, alt, tol=0.001, horizon=0): """Calculate passes for the next hours for a given start time and a given observer. Original by Martin. utc_time: Observation time (datetime object) length: Number of hours to find passes (int) lon: Longitude of observer position on ground (float) lat: Latitude of observer position on ground (float) alt: Altitude above sea-level (geoid) of observer position on ground (float) tol: precision of the result in seconds horizon: the elevation of horizon to compute risetime and falltime. Return: [(rise-time, fall-time, max-elevation-time), ...] """ def elevation(minutes): """Compute the elevation.""" return self.get_observer_look(utc_time + timedelta( minutes=np.float64(minutes)), lon, lat, alt)[1] - horizon def elevation_inv(minutes): """Compute the inverse of elevation.""" return -elevation(minutes) def get_root(fun, start, end, tol=0.01): """Root finding scheme""" x_0 = end x_1 = start fx_0 = fun(end) fx_1 = fun(start) if abs(fx_0) < abs(fx_1): fx_0, fx_1 = fx_1, fx_0 x_0, x_1 = x_1, x_0 x_n = optimize.brentq(fun, x_0, x_1) return x_n def get_max_parab(fun, start, end, tol=0.01): """Successive parabolic interpolation.""" a = float(start) c = float(end) b = (a + c) / 2.0 f_a = fun(a) f_b = fun(b) f_c = fun(c) x = b while True: x = x - 0.5 * (((b - a) ** 2 * (f_b - f_c) - (b - c) ** 2 * (f_b - f_a)) / ((b - a) * (f_b - f_c) - (b - c) * (f_b - f_a))) if np.isnan(x): return b if abs(b - x) <= tol: return x a, b, c = (a + x) / 2.0, x, (x + c) / 2.0 f_a, f_b, f_c = fun(a), fun(b), fun(c) # every minute times = utc_time + np.array([timedelta(minutes=minutes) for minutes in range(length * 60)]) elev = self.get_observer_look(times, lon, lat, alt)[1] - horizon zcs = np.where(np.diff(np.sign(elev)))[0] res = [] risetime = None falltime = None for guess in zcs: horizon_mins = get_root( elevation, guess, guess + 1.0, tol=tol / 60.0) horizon_time = utc_time + timedelta(minutes=horizon_mins) if elev[guess] < 0: risetime = horizon_time risemins = horizon_mins falltime = None else: falltime = horizon_time fallmins = horizon_mins if risetime: int_start = max(0, int(np.floor(risemins))) int_end = min(len(elev), int(np.ceil(fallmins) + 1)) middle = int_start + np.argmax(elev[int_start:int_end]) highest = utc_time + \ timedelta(minutes=get_max_parab( elevation_inv, max(risemins, middle - 1), min(fallmins, middle + 1), tol=tol / 60.0 )) res += [(risetime, falltime, highest)] risetime = None return res
[ "def", "get_next_passes", "(", "self", ",", "utc_time", ",", "length", ",", "lon", ",", "lat", ",", "alt", ",", "tol", "=", "0.001", ",", "horizon", "=", "0", ")", ":", "def", "elevation", "(", "minutes", ")", ":", "\"\"\"Compute the elevation.\"\"\"", "return", "self", ".", "get_observer_look", "(", "utc_time", "+", "timedelta", "(", "minutes", "=", "np", ".", "float64", "(", "minutes", ")", ")", ",", "lon", ",", "lat", ",", "alt", ")", "[", "1", "]", "-", "horizon", "def", "elevation_inv", "(", "minutes", ")", ":", "\"\"\"Compute the inverse of elevation.\"\"\"", "return", "-", "elevation", "(", "minutes", ")", "def", "get_root", "(", "fun", ",", "start", ",", "end", ",", "tol", "=", "0.01", ")", ":", "\"\"\"Root finding scheme\"\"\"", "x_0", "=", "end", "x_1", "=", "start", "fx_0", "=", "fun", "(", "end", ")", "fx_1", "=", "fun", "(", "start", ")", "if", "abs", "(", "fx_0", ")", "<", "abs", "(", "fx_1", ")", ":", "fx_0", ",", "fx_1", "=", "fx_1", ",", "fx_0", "x_0", ",", "x_1", "=", "x_1", ",", "x_0", "x_n", "=", "optimize", ".", "brentq", "(", "fun", ",", "x_0", ",", "x_1", ")", "return", "x_n", "def", "get_max_parab", "(", "fun", ",", "start", ",", "end", ",", "tol", "=", "0.01", ")", ":", "\"\"\"Successive parabolic interpolation.\"\"\"", "a", "=", "float", "(", "start", ")", "c", "=", "float", "(", "end", ")", "b", "=", "(", "a", "+", "c", ")", "/", "2.0", "f_a", "=", "fun", "(", "a", ")", "f_b", "=", "fun", "(", "b", ")", "f_c", "=", "fun", "(", "c", ")", "x", "=", "b", "while", "True", ":", "x", "=", "x", "-", "0.5", "*", "(", "(", "(", "b", "-", "a", ")", "**", "2", "*", "(", "f_b", "-", "f_c", ")", "-", "(", "b", "-", "c", ")", "**", "2", "*", "(", "f_b", "-", "f_a", ")", ")", "/", "(", "(", "b", "-", "a", ")", "*", "(", "f_b", "-", "f_c", ")", "-", "(", "b", "-", "c", ")", "*", "(", "f_b", "-", "f_a", ")", ")", ")", "if", "np", ".", "isnan", "(", "x", ")", ":", "return", "b", "if", "abs", "(", "b", "-", "x", ")", "<=", "tol", ":", "return", "x", "a", ",", "b", ",", "c", "=", "(", "a", "+", "x", ")", "/", "2.0", ",", "x", ",", "(", "x", "+", "c", ")", "/", "2.0", "f_a", ",", "f_b", ",", "f_c", "=", "fun", "(", "a", ")", ",", "fun", "(", "b", ")", ",", "fun", "(", "c", ")", "# every minute", "times", "=", "utc_time", "+", "np", ".", "array", "(", "[", "timedelta", "(", "minutes", "=", "minutes", ")", "for", "minutes", "in", "range", "(", "length", "*", "60", ")", "]", ")", "elev", "=", "self", ".", "get_observer_look", "(", "times", ",", "lon", ",", "lat", ",", "alt", ")", "[", "1", "]", "-", "horizon", "zcs", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "np", ".", "sign", "(", "elev", ")", ")", ")", "[", "0", "]", "res", "=", "[", "]", "risetime", "=", "None", "falltime", "=", "None", "for", "guess", "in", "zcs", ":", "horizon_mins", "=", "get_root", "(", "elevation", ",", "guess", ",", "guess", "+", "1.0", ",", "tol", "=", "tol", "/", "60.0", ")", "horizon_time", "=", "utc_time", "+", "timedelta", "(", "minutes", "=", "horizon_mins", ")", "if", "elev", "[", "guess", "]", "<", "0", ":", "risetime", "=", "horizon_time", "risemins", "=", "horizon_mins", "falltime", "=", "None", "else", ":", "falltime", "=", "horizon_time", "fallmins", "=", "horizon_mins", "if", "risetime", ":", "int_start", "=", "max", "(", "0", ",", "int", "(", "np", ".", "floor", "(", "risemins", ")", ")", ")", "int_end", "=", "min", "(", "len", "(", "elev", ")", ",", "int", "(", "np", ".", "ceil", "(", "fallmins", ")", "+", "1", ")", ")", "middle", "=", "int_start", "+", "np", ".", "argmax", "(", "elev", "[", "int_start", ":", "int_end", "]", ")", "highest", "=", "utc_time", "+", "timedelta", "(", "minutes", "=", "get_max_parab", "(", "elevation_inv", ",", "max", "(", "risemins", ",", "middle", "-", "1", ")", ",", "min", "(", "fallmins", ",", "middle", "+", "1", ")", ",", "tol", "=", "tol", "/", "60.0", ")", ")", "res", "+=", "[", "(", "risetime", ",", "falltime", ",", "highest", ")", "]", "risetime", "=", "None", "return", "res" ]
Calculate passes for the next hours for a given start time and a given observer. Original by Martin. utc_time: Observation time (datetime object) length: Number of hours to find passes (int) lon: Longitude of observer position on ground (float) lat: Latitude of observer position on ground (float) alt: Altitude above sea-level (geoid) of observer position on ground (float) tol: precision of the result in seconds horizon: the elevation of horizon to compute risetime and falltime. Return: [(rise-time, fall-time, max-elevation-time), ...]
[ "Calculate", "passes", "for", "the", "next", "hours", "for", "a", "given", "start", "time", "and", "a", "given", "observer", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L335-L431
pytroll/pyorbital
pyorbital/orbital.py
Orbital._get_time_at_horizon
def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs): """Get the time closest in time to *utc_time* when the satellite is at the horizon relative to the position of an observer on ground (altitude = 0) Note: This is considered deprecated and it's functionality is currently replaced by 'get_next_passes'. """ warnings.warn("_get_time_at_horizon is replaced with get_next_passes", DeprecationWarning) if "precision" in kwargs: precision = kwargs['precision'] else: precision = timedelta(seconds=0.001) if "max_iterations" in kwargs: nmax_iter = kwargs["max_iterations"] else: nmax_iter = 100 sec_step = 0.5 t_step = timedelta(seconds=sec_step / 2.0) # Local derivative: def fprime(timex): el0 = self.get_observer_look(timex - t_step, obslon, obslat, 0.0)[1] el1 = self.get_observer_look(timex + t_step, obslon, obslat, 0.0)[1] return el0, (abs(el1) - abs(el0)) / sec_step tx0 = utc_time - timedelta(seconds=1.0) tx1 = utc_time idx = 0 # eps = 500. eps = 100. while abs(tx1 - tx0) > precision and idx < nmax_iter: tx0 = tx1 fpr = fprime(tx0) # When the elevation is high the scale is high, and when # the elevation is low the scale is low # var_scale = np.abs(np.sin(fpr[0] * np.pi/180.)) # var_scale = np.sqrt(var_scale) var_scale = np.abs(fpr[0]) tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1])) idx = idx + 1 # print idx, tx0, tx1, var_scale, fpr if abs(tx1 - utc_time) < precision and idx < 2: tx1 = tx1 + timedelta(seconds=1.0) if abs(tx1 - tx0) <= precision and idx < nmax_iter: return tx1 else: return None
python
def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs): """Get the time closest in time to *utc_time* when the satellite is at the horizon relative to the position of an observer on ground (altitude = 0) Note: This is considered deprecated and it's functionality is currently replaced by 'get_next_passes'. """ warnings.warn("_get_time_at_horizon is replaced with get_next_passes", DeprecationWarning) if "precision" in kwargs: precision = kwargs['precision'] else: precision = timedelta(seconds=0.001) if "max_iterations" in kwargs: nmax_iter = kwargs["max_iterations"] else: nmax_iter = 100 sec_step = 0.5 t_step = timedelta(seconds=sec_step / 2.0) # Local derivative: def fprime(timex): el0 = self.get_observer_look(timex - t_step, obslon, obslat, 0.0)[1] el1 = self.get_observer_look(timex + t_step, obslon, obslat, 0.0)[1] return el0, (abs(el1) - abs(el0)) / sec_step tx0 = utc_time - timedelta(seconds=1.0) tx1 = utc_time idx = 0 # eps = 500. eps = 100. while abs(tx1 - tx0) > precision and idx < nmax_iter: tx0 = tx1 fpr = fprime(tx0) # When the elevation is high the scale is high, and when # the elevation is low the scale is low # var_scale = np.abs(np.sin(fpr[0] * np.pi/180.)) # var_scale = np.sqrt(var_scale) var_scale = np.abs(fpr[0]) tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1])) idx = idx + 1 # print idx, tx0, tx1, var_scale, fpr if abs(tx1 - utc_time) < precision and idx < 2: tx1 = tx1 + timedelta(seconds=1.0) if abs(tx1 - tx0) <= precision and idx < nmax_iter: return tx1 else: return None
[ "def", "_get_time_at_horizon", "(", "self", ",", "utc_time", ",", "obslon", ",", "obslat", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"_get_time_at_horizon is replaced with get_next_passes\"", ",", "DeprecationWarning", ")", "if", "\"precision\"", "in", "kwargs", ":", "precision", "=", "kwargs", "[", "'precision'", "]", "else", ":", "precision", "=", "timedelta", "(", "seconds", "=", "0.001", ")", "if", "\"max_iterations\"", "in", "kwargs", ":", "nmax_iter", "=", "kwargs", "[", "\"max_iterations\"", "]", "else", ":", "nmax_iter", "=", "100", "sec_step", "=", "0.5", "t_step", "=", "timedelta", "(", "seconds", "=", "sec_step", "/", "2.0", ")", "# Local derivative:", "def", "fprime", "(", "timex", ")", ":", "el0", "=", "self", ".", "get_observer_look", "(", "timex", "-", "t_step", ",", "obslon", ",", "obslat", ",", "0.0", ")", "[", "1", "]", "el1", "=", "self", ".", "get_observer_look", "(", "timex", "+", "t_step", ",", "obslon", ",", "obslat", ",", "0.0", ")", "[", "1", "]", "return", "el0", ",", "(", "abs", "(", "el1", ")", "-", "abs", "(", "el0", ")", ")", "/", "sec_step", "tx0", "=", "utc_time", "-", "timedelta", "(", "seconds", "=", "1.0", ")", "tx1", "=", "utc_time", "idx", "=", "0", "# eps = 500.", "eps", "=", "100.", "while", "abs", "(", "tx1", "-", "tx0", ")", ">", "precision", "and", "idx", "<", "nmax_iter", ":", "tx0", "=", "tx1", "fpr", "=", "fprime", "(", "tx0", ")", "# When the elevation is high the scale is high, and when", "# the elevation is low the scale is low", "# var_scale = np.abs(np.sin(fpr[0] * np.pi/180.))", "# var_scale = np.sqrt(var_scale)", "var_scale", "=", "np", ".", "abs", "(", "fpr", "[", "0", "]", ")", "tx1", "=", "tx0", "-", "timedelta", "(", "seconds", "=", "(", "eps", "*", "var_scale", "*", "fpr", "[", "1", "]", ")", ")", "idx", "=", "idx", "+", "1", "# print idx, tx0, tx1, var_scale, fpr", "if", "abs", "(", "tx1", "-", "utc_time", ")", "<", "precision", "and", "idx", "<", "2", ":", "tx1", "=", "tx1", "+", "timedelta", "(", "seconds", "=", "1.0", ")", "if", "abs", "(", "tx1", "-", "tx0", ")", "<=", "precision", "and", "idx", "<", "nmax_iter", ":", "return", "tx1", "else", ":", "return", "None" ]
Get the time closest in time to *utc_time* when the satellite is at the horizon relative to the position of an observer on ground (altitude = 0) Note: This is considered deprecated and it's functionality is currently replaced by 'get_next_passes'.
[ "Get", "the", "time", "closest", "in", "time", "to", "*", "utc_time", "*", "when", "the", "satellite", "is", "at", "the", "horizon", "relative", "to", "the", "position", "of", "an", "observer", "on", "ground", "(", "altitude", "=", "0", ")" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L433-L485
pytroll/pyorbital
pyorbital/geoloc.py
subpoint
def subpoint(query_point, a=A, b=B): """Get the point on the ellipsoid under the *query_point*.""" x, y, z = query_point lat = geodetic_lat(query_point) lon = np.arctan2(y, x) e2_ = (a * a - b * b) / (a * a) n__ = a / np.sqrt(1 - e2_ * np.sin(lat)**2) nx_ = n__ * np.cos(lat) * np.cos(lon) ny_ = n__ * np.cos(lat) * np.sin(lon) nz_ = (1 - e2_) * n__ * np.sin(lat) return np.stack([nx_, ny_, nz_], axis=0)
python
def subpoint(query_point, a=A, b=B): """Get the point on the ellipsoid under the *query_point*.""" x, y, z = query_point lat = geodetic_lat(query_point) lon = np.arctan2(y, x) e2_ = (a * a - b * b) / (a * a) n__ = a / np.sqrt(1 - e2_ * np.sin(lat)**2) nx_ = n__ * np.cos(lat) * np.cos(lon) ny_ = n__ * np.cos(lat) * np.sin(lon) nz_ = (1 - e2_) * n__ * np.sin(lat) return np.stack([nx_, ny_, nz_], axis=0)
[ "def", "subpoint", "(", "query_point", ",", "a", "=", "A", ",", "b", "=", "B", ")", ":", "x", ",", "y", ",", "z", "=", "query_point", "lat", "=", "geodetic_lat", "(", "query_point", ")", "lon", "=", "np", ".", "arctan2", "(", "y", ",", "x", ")", "e2_", "=", "(", "a", "*", "a", "-", "b", "*", "b", ")", "/", "(", "a", "*", "a", ")", "n__", "=", "a", "/", "np", ".", "sqrt", "(", "1", "-", "e2_", "*", "np", ".", "sin", "(", "lat", ")", "**", "2", ")", "nx_", "=", "n__", "*", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "cos", "(", "lon", ")", "ny_", "=", "n__", "*", "np", ".", "cos", "(", "lat", ")", "*", "np", ".", "sin", "(", "lon", ")", "nz_", "=", "(", "1", "-", "e2_", ")", "*", "n__", "*", "np", ".", "sin", "(", "lat", ")", "return", "np", ".", "stack", "(", "[", "nx_", ",", "ny_", ",", "nz_", "]", ",", "axis", "=", "0", ")" ]
Get the point on the ellipsoid under the *query_point*.
[ "Get", "the", "point", "on", "the", "ellipsoid", "under", "the", "*", "query_point", "*", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L61-L73
pytroll/pyorbital
pyorbital/geoloc.py
qrotate
def qrotate(vector, axis, angle): """Rotate *vector* around *axis* by *angle* (in radians). *vector* is a matrix of column vectors, as is *axis*. This function uses quaternion rotation. """ n_axis = axis / vnorm(axis) sin_angle = np.expand_dims(np.sin(angle / 2), 0) if np.ndim(n_axis) == 1: n_axis = np.expand_dims(n_axis, 1) p__ = np.dot(n_axis, sin_angle)[:, np.newaxis] else: p__ = n_axis * sin_angle q__ = Quaternion(np.cos(angle / 2), p__) shape = vector.shape return np.einsum("kj, ikj->ij", vector.reshape((3, -1)), q__.rotation_matrix()[:3, :3]).reshape(shape)
python
def qrotate(vector, axis, angle): """Rotate *vector* around *axis* by *angle* (in radians). *vector* is a matrix of column vectors, as is *axis*. This function uses quaternion rotation. """ n_axis = axis / vnorm(axis) sin_angle = np.expand_dims(np.sin(angle / 2), 0) if np.ndim(n_axis) == 1: n_axis = np.expand_dims(n_axis, 1) p__ = np.dot(n_axis, sin_angle)[:, np.newaxis] else: p__ = n_axis * sin_angle q__ = Quaternion(np.cos(angle / 2), p__) shape = vector.shape return np.einsum("kj, ikj->ij", vector.reshape((3, -1)), q__.rotation_matrix()[:3, :3]).reshape(shape)
[ "def", "qrotate", "(", "vector", ",", "axis", ",", "angle", ")", ":", "n_axis", "=", "axis", "/", "vnorm", "(", "axis", ")", "sin_angle", "=", "np", ".", "expand_dims", "(", "np", ".", "sin", "(", "angle", "/", "2", ")", ",", "0", ")", "if", "np", ".", "ndim", "(", "n_axis", ")", "==", "1", ":", "n_axis", "=", "np", ".", "expand_dims", "(", "n_axis", ",", "1", ")", "p__", "=", "np", ".", "dot", "(", "n_axis", ",", "sin_angle", ")", "[", ":", ",", "np", ".", "newaxis", "]", "else", ":", "p__", "=", "n_axis", "*", "sin_angle", "q__", "=", "Quaternion", "(", "np", ".", "cos", "(", "angle", "/", "2", ")", ",", "p__", ")", "shape", "=", "vector", ".", "shape", "return", "np", ".", "einsum", "(", "\"kj, ikj->ij\"", ",", "vector", ".", "reshape", "(", "(", "3", ",", "-", "1", ")", ")", ",", "q__", ".", "rotation_matrix", "(", ")", "[", ":", "3", ",", ":", "3", "]", ")", ".", "reshape", "(", "shape", ")" ]
Rotate *vector* around *axis* by *angle* (in radians). *vector* is a matrix of column vectors, as is *axis*. This function uses quaternion rotation.
[ "Rotate", "*", "vector", "*", "around", "*", "axis", "*", "by", "*", "angle", "*", "(", "in", "radians", ")", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L155-L173
pytroll/pyorbital
pyorbital/geoloc.py
compute_pixels
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)): """Compute cartesian coordinates of the pixels in instrument scan.""" if isinstance(orb, (list, tuple)): tle1, tle2 = orb orb = Orbital("mysatellite", line1=tle1, line2=tle2) # get position and velocity for each time of each pixel pos, vel = orb.get_position(times, normalize=False) # now, get the vectors pointing to each pixel vectors = sgeom.vectors(pos, vel, *rpy) # compute intersection of lines (directed by vectors and passing through # (0, 0, 0)) and ellipsoid. Derived from: # http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection # do the computation between line and ellipsoid (WGS 84) # NB: AAPP uses GRS 80... centre = -pos a__ = 6378.137 # km # b__ = 6356.75231414 # km, GRS80 b__ = 6356.752314245 # km, WGS84 radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T shape = vectors.shape xr_ = vectors.reshape([3, -1]) * radius cr_ = centre.reshape([3, -1]) * radius ldotc = np.einsum("ij,ij->j", xr_, cr_) lsq = np.einsum("ij,ij->j", xr_, xr_) csq = np.einsum("ij,ij->j", cr_, cr_) d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq # return the actual pixel positions return vectors * d1_.reshape(shape[1:]) - centre
python
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)): """Compute cartesian coordinates of the pixels in instrument scan.""" if isinstance(orb, (list, tuple)): tle1, tle2 = orb orb = Orbital("mysatellite", line1=tle1, line2=tle2) # get position and velocity for each time of each pixel pos, vel = orb.get_position(times, normalize=False) # now, get the vectors pointing to each pixel vectors = sgeom.vectors(pos, vel, *rpy) # compute intersection of lines (directed by vectors and passing through # (0, 0, 0)) and ellipsoid. Derived from: # http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection # do the computation between line and ellipsoid (WGS 84) # NB: AAPP uses GRS 80... centre = -pos a__ = 6378.137 # km # b__ = 6356.75231414 # km, GRS80 b__ = 6356.752314245 # km, WGS84 radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T shape = vectors.shape xr_ = vectors.reshape([3, -1]) * radius cr_ = centre.reshape([3, -1]) * radius ldotc = np.einsum("ij,ij->j", xr_, cr_) lsq = np.einsum("ij,ij->j", xr_, xr_) csq = np.einsum("ij,ij->j", cr_, cr_) d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq # return the actual pixel positions return vectors * d1_.reshape(shape[1:]) - centre
[ "def", "compute_pixels", "(", "orb", ",", "sgeom", ",", "times", ",", "rpy", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", ")", ":", "if", "isinstance", "(", "orb", ",", "(", "list", ",", "tuple", ")", ")", ":", "tle1", ",", "tle2", "=", "orb", "orb", "=", "Orbital", "(", "\"mysatellite\"", ",", "line1", "=", "tle1", ",", "line2", "=", "tle2", ")", "# get position and velocity for each time of each pixel", "pos", ",", "vel", "=", "orb", ".", "get_position", "(", "times", ",", "normalize", "=", "False", ")", "# now, get the vectors pointing to each pixel", "vectors", "=", "sgeom", ".", "vectors", "(", "pos", ",", "vel", ",", "*", "rpy", ")", "# compute intersection of lines (directed by vectors and passing through", "# (0, 0, 0)) and ellipsoid. Derived from:", "# http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection", "# do the computation between line and ellipsoid (WGS 84)", "# NB: AAPP uses GRS 80...", "centre", "=", "-", "pos", "a__", "=", "6378.137", "# km", "# b__ = 6356.75231414 # km, GRS80", "b__", "=", "6356.752314245", "# km, WGS84", "radius", "=", "np", ".", "array", "(", "[", "[", "1", "/", "a__", ",", "1", "/", "a__", ",", "1", "/", "b__", "]", "]", ")", ".", "T", "shape", "=", "vectors", ".", "shape", "xr_", "=", "vectors", ".", "reshape", "(", "[", "3", ",", "-", "1", "]", ")", "*", "radius", "cr_", "=", "centre", ".", "reshape", "(", "[", "3", ",", "-", "1", "]", ")", "*", "radius", "ldotc", "=", "np", ".", "einsum", "(", "\"ij,ij->j\"", ",", "xr_", ",", "cr_", ")", "lsq", "=", "np", ".", "einsum", "(", "\"ij,ij->j\"", ",", "xr_", ",", "xr_", ")", "csq", "=", "np", ".", "einsum", "(", "\"ij,ij->j\"", ",", "cr_", ",", "cr_", ")", "d1_", "=", "(", "ldotc", "-", "np", ".", "sqrt", "(", "ldotc", "**", "2", "-", "csq", "*", "lsq", "+", "lsq", ")", ")", "/", "lsq", "# return the actual pixel positions", "return", "vectors", "*", "d1_", ".", "reshape", "(", "shape", "[", "1", ":", "]", ")", "-", "centre" ]
Compute cartesian coordinates of the pixels in instrument scan.
[ "Compute", "cartesian", "coordinates", "of", "the", "pixels", "in", "instrument", "scan", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L204-L238
pytroll/pyorbital
pyorbital/geoloc.py
mnorm
def mnorm(m, axis=None): """norm of a matrix of vectors stacked along the *axis* dimension.""" if axis is None: axis = np.ndim(m) - 1 return np.sqrt((m**2).sum(axis))
python
def mnorm(m, axis=None): """norm of a matrix of vectors stacked along the *axis* dimension.""" if axis is None: axis = np.ndim(m) - 1 return np.sqrt((m**2).sum(axis))
[ "def", "mnorm", "(", "m", ",", "axis", "=", "None", ")", ":", "if", "axis", "is", "None", ":", "axis", "=", "np", ".", "ndim", "(", "m", ")", "-", "1", "return", "np", ".", "sqrt", "(", "(", "m", "**", "2", ")", ".", "sum", "(", "axis", ")", ")" ]
norm of a matrix of vectors stacked along the *axis* dimension.
[ "norm", "of", "a", "matrix", "of", "vectors", "stacked", "along", "the", "*", "axis", "*", "dimension", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L245-L249
pytroll/pyorbital
pyorbital/geoloc.py
ScanGeometry.vectors
def vectors(self, pos, vel, roll=0.0, pitch=0.0, yaw=0.0): """Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows. """ # TODO: yaw steering mode ! # Fake nadir: This is the intersection point between the satellite # looking down at the centre of the ellipsoid and the surface of the # ellipsoid. Nadir on the other hand is the point which vertical goes # through the satellite... # nadir = -pos / vnorm(pos) nadir = subpoint(-pos) nadir /= vnorm(nadir) # x is along track (roll) x = vel / vnorm(vel) # y is cross track (pitch) y = np.cross(nadir, vel, 0, 0, 0) y /= vnorm(y) # rotate first around x x_rotated = qrotate(nadir, x, self.fovs[0] + roll) # then around y xy_rotated = qrotate(x_rotated, y, self.fovs[1] + pitch) # then around z return qrotate(xy_rotated, nadir, yaw)
python
def vectors(self, pos, vel, roll=0.0, pitch=0.0, yaw=0.0): """Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows. """ # TODO: yaw steering mode ! # Fake nadir: This is the intersection point between the satellite # looking down at the centre of the ellipsoid and the surface of the # ellipsoid. Nadir on the other hand is the point which vertical goes # through the satellite... # nadir = -pos / vnorm(pos) nadir = subpoint(-pos) nadir /= vnorm(nadir) # x is along track (roll) x = vel / vnorm(vel) # y is cross track (pitch) y = np.cross(nadir, vel, 0, 0, 0) y /= vnorm(y) # rotate first around x x_rotated = qrotate(nadir, x, self.fovs[0] + roll) # then around y xy_rotated = qrotate(x_rotated, y, self.fovs[1] + pitch) # then around z return qrotate(xy_rotated, nadir, yaw)
[ "def", "vectors", "(", "self", ",", "pos", ",", "vel", ",", "roll", "=", "0.0", ",", "pitch", "=", "0.0", ",", "yaw", "=", "0.0", ")", ":", "# TODO: yaw steering mode !", "# Fake nadir: This is the intersection point between the satellite", "# looking down at the centre of the ellipsoid and the surface of the", "# ellipsoid. Nadir on the other hand is the point which vertical goes", "# through the satellite...", "# nadir = -pos / vnorm(pos)", "nadir", "=", "subpoint", "(", "-", "pos", ")", "nadir", "/=", "vnorm", "(", "nadir", ")", "# x is along track (roll)", "x", "=", "vel", "/", "vnorm", "(", "vel", ")", "# y is cross track (pitch)", "y", "=", "np", ".", "cross", "(", "nadir", ",", "vel", ",", "0", ",", "0", ",", "0", ")", "y", "/=", "vnorm", "(", "y", ")", "# rotate first around x", "x_rotated", "=", "qrotate", "(", "nadir", ",", "x", ",", "self", ".", "fovs", "[", "0", "]", "+", "roll", ")", "# then around y", "xy_rotated", "=", "qrotate", "(", "x_rotated", ",", "y", ",", "self", ".", "fovs", "[", "1", "]", "+", "pitch", ")", "# then around z", "return", "qrotate", "(", "xy_rotated", ",", "nadir", ",", "yaw", ")" ]
Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows.
[ "Get", "unit", "vectors", "pointing", "to", "the", "different", "pixels", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L90-L119
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
avhrr
def avhrr(scans_nb, scan_points, scan_angle=55.37, frequency=1 / 6.0, apply_offset=True): """Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ # build the avhrr instrument (scan angles) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1]) if apply_offset: offset = np.arange(np.int(scans_nb)) * frequency times += np.expand_dims(offset, 1) return ScanGeometry(avhrr_inst, times)
python
def avhrr(scans_nb, scan_points, scan_angle=55.37, frequency=1 / 6.0, apply_offset=True): """Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ # build the avhrr instrument (scan angles) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1]) if apply_offset: offset = np.arange(np.int(scans_nb)) * frequency times += np.expand_dims(offset, 1) return ScanGeometry(avhrr_inst, times)
[ "def", "avhrr", "(", "scans_nb", ",", "scan_points", ",", "scan_angle", "=", "55.37", ",", "frequency", "=", "1", "/", "6.0", ",", "apply_offset", "=", "True", ")", ":", "# build the avhrr instrument (scan angles)", "avhrr_inst", "=", "np", ".", "vstack", "(", "(", "(", "scan_points", "/", "1023.5", "-", "1", ")", "*", "np", ".", "deg2rad", "(", "-", "scan_angle", ")", ",", "np", ".", "zeros", "(", "(", "len", "(", "scan_points", ")", ",", ")", ")", ")", ")", "avhrr_inst", "=", "np", ".", "tile", "(", "avhrr_inst", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ",", "[", "1", ",", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "# building the corresponding times array", "# times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1])", "# + np.expand_dims(offset, 1))", "times", "=", "np", ".", "tile", "(", "scan_points", "*", "0.000025", ",", "[", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "if", "apply_offset", ":", "offset", "=", "np", ".", "arange", "(", "np", ".", "int", "(", "scans_nb", ")", ")", "*", "frequency", "times", "+=", "np", ".", "expand_dims", "(", "offset", ",", "1", ")", "return", "ScanGeometry", "(", "avhrr_inst", ",", "times", ")" ]
Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm
[ "Definition", "of", "the", "avhrr", "instrument", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L52-L76
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
avhrr_gac
def avhrr_gac(scan_times, scan_points, scan_angle=55.37, frequency=0.5): """Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ try: offset = np.array([(t - scan_times[0]).seconds + (t - scan_times[0]).microseconds / 1000000.0 for t in scan_times]) except TypeError: offset = np.arange(scan_times) * frequency scans_nb = len(offset) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array times = (np.tile(scan_points * 0.000025, [scans_nb, 1]) + np.expand_dims(offset, 1)) return ScanGeometry(avhrr_inst, times)
python
def avhrr_gac(scan_times, scan_points, scan_angle=55.37, frequency=0.5): """Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ try: offset = np.array([(t - scan_times[0]).seconds + (t - scan_times[0]).microseconds / 1000000.0 for t in scan_times]) except TypeError: offset = np.arange(scan_times) * frequency scans_nb = len(offset) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array times = (np.tile(scan_points * 0.000025, [scans_nb, 1]) + np.expand_dims(offset, 1)) return ScanGeometry(avhrr_inst, times)
[ "def", "avhrr_gac", "(", "scan_times", ",", "scan_points", ",", "scan_angle", "=", "55.37", ",", "frequency", "=", "0.5", ")", ":", "try", ":", "offset", "=", "np", ".", "array", "(", "[", "(", "t", "-", "scan_times", "[", "0", "]", ")", ".", "seconds", "+", "(", "t", "-", "scan_times", "[", "0", "]", ")", ".", "microseconds", "/", "1000000.0", "for", "t", "in", "scan_times", "]", ")", "except", "TypeError", ":", "offset", "=", "np", ".", "arange", "(", "scan_times", ")", "*", "frequency", "scans_nb", "=", "len", "(", "offset", ")", "avhrr_inst", "=", "np", ".", "vstack", "(", "(", "(", "scan_points", "/", "1023.5", "-", "1", ")", "*", "np", ".", "deg2rad", "(", "-", "scan_angle", ")", ",", "np", ".", "zeros", "(", "(", "len", "(", "scan_points", ")", ",", ")", ")", ")", ")", "avhrr_inst", "=", "np", ".", "tile", "(", "avhrr_inst", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ",", "[", "1", ",", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "# building the corresponding times array", "times", "=", "(", "np", ".", "tile", "(", "scan_points", "*", "0.000025", ",", "[", "scans_nb", ",", "1", "]", ")", "+", "np", ".", "expand_dims", "(", "offset", ",", "1", ")", ")", "return", "ScanGeometry", "(", "avhrr_inst", ",", "times", ")" ]
Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm
[ "Definition", "of", "the", "avhrr", "instrument", "gac", "version" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L79-L102
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
viirs
def viirs(scans_nb, scan_indices=slice(0, None), chn_pixels=6400, scan_lines=32, scan_step=1): """Describe VIIRS instrument geometry, I-band by default. VIIRS scans several lines simultaneously (there are 16 detectors for each M-band, 32 detectors for each I-band) so the scan angles (and times) are two-dimensional arrays, contrary to AVHRR for example. scan_step: The increment in number of scans. E.g. if scan_step is 100 and the number of scans (scans_nb) is 10 then these 10 scans are distributed over the swath so that between each scan there are 99 emtpy (excluded) scans """ entire_width = np.arange(chn_pixels) scan_points = entire_width[scan_indices.astype('int')] scan_pixels = len(scan_points) ''' initial angle 55.84 deg replaced with 56.28 deg found in VIIRS User's Guide from NESDIS, version 1.2 (09/10/2013). Ref : NOAA Technical Report NESDIS 142. Seems to be better (not quantified)''' across_track = \ (scan_points / (chn_pixels / 2. - 0.5) - 1) * np.deg2rad(-56.28) y_max_angle = np.arctan2(11.87 / 2, 824.0) along_track = \ -(np.arange(scan_lines) / (scan_lines / 2. - 0.5) - 1) * \ y_max_angle scan = np.dstack((np.tile(across_track, (scan_lines, 1)).T, np.tile(along_track, (scan_pixels, 1)))) npp = np.tile(scan, [scans_nb, 1]).T # from the timestamp in the filenames, a granule takes 1:25.400 to record # (85.4 seconds) so 1.779166667 would be the duration of 1 scanline (48 # scans per granule) dividing the duration of a single scan by a width of # 6400 pixels results in 0.0002779947917 seconds for each column of 32 # pixels in the scanline # the individual times per pixel are probably wrong, unless the scanning # behaves the same as for AVHRR, The VIIRS sensor rotates to allow internal # calibration before each scanline. This would imply that the scanline # always moves in the same direction. more info @ # http://www.eoportal.org/directory/pres_NPOESSNationalPolarorbitingOperationalEnvironmentalSatelliteSystem.html SEC_EACH_SCANCOLUMN = 0.0002779947917 sec_scan_duration = 1.779166667 times = np.tile(scan_points * SEC_EACH_SCANCOLUMN, [np.int(scans_nb*scan_lines), 1]) offset = np.repeat(np.arange(scans_nb) * sec_scan_duration*scan_step, scan_lines) times += np.expand_dims(offset, 1) # build the scan geometry object return ScanGeometry(npp, times)
python
def viirs(scans_nb, scan_indices=slice(0, None), chn_pixels=6400, scan_lines=32, scan_step=1): """Describe VIIRS instrument geometry, I-band by default. VIIRS scans several lines simultaneously (there are 16 detectors for each M-band, 32 detectors for each I-band) so the scan angles (and times) are two-dimensional arrays, contrary to AVHRR for example. scan_step: The increment in number of scans. E.g. if scan_step is 100 and the number of scans (scans_nb) is 10 then these 10 scans are distributed over the swath so that between each scan there are 99 emtpy (excluded) scans """ entire_width = np.arange(chn_pixels) scan_points = entire_width[scan_indices.astype('int')] scan_pixels = len(scan_points) ''' initial angle 55.84 deg replaced with 56.28 deg found in VIIRS User's Guide from NESDIS, version 1.2 (09/10/2013). Ref : NOAA Technical Report NESDIS 142. Seems to be better (not quantified)''' across_track = \ (scan_points / (chn_pixels / 2. - 0.5) - 1) * np.deg2rad(-56.28) y_max_angle = np.arctan2(11.87 / 2, 824.0) along_track = \ -(np.arange(scan_lines) / (scan_lines / 2. - 0.5) - 1) * \ y_max_angle scan = np.dstack((np.tile(across_track, (scan_lines, 1)).T, np.tile(along_track, (scan_pixels, 1)))) npp = np.tile(scan, [scans_nb, 1]).T # from the timestamp in the filenames, a granule takes 1:25.400 to record # (85.4 seconds) so 1.779166667 would be the duration of 1 scanline (48 # scans per granule) dividing the duration of a single scan by a width of # 6400 pixels results in 0.0002779947917 seconds for each column of 32 # pixels in the scanline # the individual times per pixel are probably wrong, unless the scanning # behaves the same as for AVHRR, The VIIRS sensor rotates to allow internal # calibration before each scanline. This would imply that the scanline # always moves in the same direction. more info @ # http://www.eoportal.org/directory/pres_NPOESSNationalPolarorbitingOperationalEnvironmentalSatelliteSystem.html SEC_EACH_SCANCOLUMN = 0.0002779947917 sec_scan_duration = 1.779166667 times = np.tile(scan_points * SEC_EACH_SCANCOLUMN, [np.int(scans_nb*scan_lines), 1]) offset = np.repeat(np.arange(scans_nb) * sec_scan_duration*scan_step, scan_lines) times += np.expand_dims(offset, 1) # build the scan geometry object return ScanGeometry(npp, times)
[ "def", "viirs", "(", "scans_nb", ",", "scan_indices", "=", "slice", "(", "0", ",", "None", ")", ",", "chn_pixels", "=", "6400", ",", "scan_lines", "=", "32", ",", "scan_step", "=", "1", ")", ":", "entire_width", "=", "np", ".", "arange", "(", "chn_pixels", ")", "scan_points", "=", "entire_width", "[", "scan_indices", ".", "astype", "(", "'int'", ")", "]", "scan_pixels", "=", "len", "(", "scan_points", ")", "''' initial angle 55.84 deg replaced with 56.28 deg found in\n VIIRS User's Guide from NESDIS, version 1.2 (09/10/2013).\n Ref : NOAA Technical Report NESDIS 142.\n Seems to be better (not quantified)'''", "across_track", "=", "(", "scan_points", "/", "(", "chn_pixels", "/", "2.", "-", "0.5", ")", "-", "1", ")", "*", "np", ".", "deg2rad", "(", "-", "56.28", ")", "y_max_angle", "=", "np", ".", "arctan2", "(", "11.87", "/", "2", ",", "824.0", ")", "along_track", "=", "-", "(", "np", ".", "arange", "(", "scan_lines", ")", "/", "(", "scan_lines", "/", "2.", "-", "0.5", ")", "-", "1", ")", "*", "y_max_angle", "scan", "=", "np", ".", "dstack", "(", "(", "np", ".", "tile", "(", "across_track", ",", "(", "scan_lines", ",", "1", ")", ")", ".", "T", ",", "np", ".", "tile", "(", "along_track", ",", "(", "scan_pixels", ",", "1", ")", ")", ")", ")", "npp", "=", "np", ".", "tile", "(", "scan", ",", "[", "scans_nb", ",", "1", "]", ")", ".", "T", "# from the timestamp in the filenames, a granule takes 1:25.400 to record", "# (85.4 seconds) so 1.779166667 would be the duration of 1 scanline (48", "# scans per granule) dividing the duration of a single scan by a width of", "# 6400 pixels results in 0.0002779947917 seconds for each column of 32", "# pixels in the scanline", "# the individual times per pixel are probably wrong, unless the scanning", "# behaves the same as for AVHRR, The VIIRS sensor rotates to allow internal", "# calibration before each scanline. This would imply that the scanline", "# always moves in the same direction. more info @", "# http://www.eoportal.org/directory/pres_NPOESSNationalPolarorbitingOperationalEnvironmentalSatelliteSystem.html", "SEC_EACH_SCANCOLUMN", "=", "0.0002779947917", "sec_scan_duration", "=", "1.779166667", "times", "=", "np", ".", "tile", "(", "scan_points", "*", "SEC_EACH_SCANCOLUMN", ",", "[", "np", ".", "int", "(", "scans_nb", "*", "scan_lines", ")", ",", "1", "]", ")", "offset", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "scans_nb", ")", "*", "sec_scan_duration", "*", "scan_step", ",", "scan_lines", ")", "times", "+=", "np", ".", "expand_dims", "(", "offset", ",", "1", ")", "# build the scan geometry object", "return", "ScanGeometry", "(", "npp", ",", "times", ")" ]
Describe VIIRS instrument geometry, I-band by default. VIIRS scans several lines simultaneously (there are 16 detectors for each M-band, 32 detectors for each I-band) so the scan angles (and times) are two-dimensional arrays, contrary to AVHRR for example. scan_step: The increment in number of scans. E.g. if scan_step is 100 and the number of scans (scans_nb) is 10 then these 10 scans are distributed over the swath so that between each scan there are 99 emtpy (excluded) scans
[ "Describe", "VIIRS", "instrument", "geometry", "I", "-", "band", "by", "default", ".", "VIIRS", "scans", "several", "lines", "simultaneously", "(", "there", "are", "16", "detectors", "for", "each", "M", "-", "band", "32", "detectors", "for", "each", "I", "-", "band", ")", "so", "the", "scan", "angles", "(", "and", "times", ")", "are", "two", "-", "dimensional", "arrays", "contrary", "to", "AVHRR", "for", "example", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L144-L197
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
hirs4
def hirs4(scans_nb, edges_only=False): """Describe HIRS/4 instrument geometry. See: - https://www.eumetsat.int/website/home/Satellites/CurrentSatellites/Metop/MetopDesign/HIRS/index.html - https://www1.ncdc.noaa.gov/pub/data/satellite/publications/podguides/ N-15%20thru%20N-19/pdf/0.0%20NOAA%20KLM%20Users%20Guide.pdf (NOAA KLM Users Guide –August 2014 Revision) Parameters: scans_nb | int - number of scan lines Keywords: * edges_only - use only edge pixels Returns: pyorbital.geoloc.ScanGeometry object """ scan_len = 56 # 56 samples per scan scan_rate = 6.4 # single scan, seconds scan_angle = -49.5 # swath, degrees sampling_interval = abs(scan_rate) / scan_len # single view, seconds if edges_only: scan_points = np.array([0, scan_len - 1]) else: scan_points = np.arange(0, scan_len) # build the instrument (scan angles) samples = np.vstack(((scan_points / (scan_len * 0.5 - 0.5) - 1) * np.deg2rad(scan_angle), np.zeros((len(scan_points),)))) samples = np.tile(samples[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array offset = np.arange(scans_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scans_nb), 1]) + np.expand_dims(offset, 1)) # build the scan geometry object return ScanGeometry(samples, times)
python
def hirs4(scans_nb, edges_only=False): """Describe HIRS/4 instrument geometry. See: - https://www.eumetsat.int/website/home/Satellites/CurrentSatellites/Metop/MetopDesign/HIRS/index.html - https://www1.ncdc.noaa.gov/pub/data/satellite/publications/podguides/ N-15%20thru%20N-19/pdf/0.0%20NOAA%20KLM%20Users%20Guide.pdf (NOAA KLM Users Guide –August 2014 Revision) Parameters: scans_nb | int - number of scan lines Keywords: * edges_only - use only edge pixels Returns: pyorbital.geoloc.ScanGeometry object """ scan_len = 56 # 56 samples per scan scan_rate = 6.4 # single scan, seconds scan_angle = -49.5 # swath, degrees sampling_interval = abs(scan_rate) / scan_len # single view, seconds if edges_only: scan_points = np.array([0, scan_len - 1]) else: scan_points = np.arange(0, scan_len) # build the instrument (scan angles) samples = np.vstack(((scan_points / (scan_len * 0.5 - 0.5) - 1) * np.deg2rad(scan_angle), np.zeros((len(scan_points),)))) samples = np.tile(samples[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array offset = np.arange(scans_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scans_nb), 1]) + np.expand_dims(offset, 1)) # build the scan geometry object return ScanGeometry(samples, times)
[ "def", "hirs4", "(", "scans_nb", ",", "edges_only", "=", "False", ")", ":", "scan_len", "=", "56", "# 56 samples per scan", "scan_rate", "=", "6.4", "# single scan, seconds", "scan_angle", "=", "-", "49.5", "# swath, degrees", "sampling_interval", "=", "abs", "(", "scan_rate", ")", "/", "scan_len", "# single view, seconds", "if", "edges_only", ":", "scan_points", "=", "np", ".", "array", "(", "[", "0", ",", "scan_len", "-", "1", "]", ")", "else", ":", "scan_points", "=", "np", ".", "arange", "(", "0", ",", "scan_len", ")", "# build the instrument (scan angles)", "samples", "=", "np", ".", "vstack", "(", "(", "(", "scan_points", "/", "(", "scan_len", "*", "0.5", "-", "0.5", ")", "-", "1", ")", "*", "np", ".", "deg2rad", "(", "scan_angle", ")", ",", "np", ".", "zeros", "(", "(", "len", "(", "scan_points", ")", ",", ")", ")", ")", ")", "samples", "=", "np", ".", "tile", "(", "samples", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ",", "[", "1", ",", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "# building the corresponding times array", "offset", "=", "np", ".", "arange", "(", "scans_nb", ")", "*", "scan_rate", "times", "=", "(", "np", ".", "tile", "(", "scan_points", "*", "sampling_interval", ",", "[", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "+", "np", ".", "expand_dims", "(", "offset", ",", "1", ")", ")", "# build the scan geometry object", "return", "ScanGeometry", "(", "samples", ",", "times", ")" ]
Describe HIRS/4 instrument geometry. See: - https://www.eumetsat.int/website/home/Satellites/CurrentSatellites/Metop/MetopDesign/HIRS/index.html - https://www1.ncdc.noaa.gov/pub/data/satellite/publications/podguides/ N-15%20thru%20N-19/pdf/0.0%20NOAA%20KLM%20Users%20Guide.pdf (NOAA KLM Users Guide –August 2014 Revision) Parameters: scans_nb | int - number of scan lines Keywords: * edges_only - use only edge pixels Returns: pyorbital.geoloc.ScanGeometry object
[ "Describe", "HIRS", "/", "4", "instrument", "geometry", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L321-L363
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
olci
def olci(scans_nb, scan_points=None): """Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage """ if scan_points is None: scan_len = 4000 # samples per scan scan_points = np.arange(4000) else: scan_len = len(scan_points) # scan_rate = 0.044 # single scan, seconds scan_angle_west = 46.5 # swath, degrees scan_angle_east = -22.1 # swath, degrees # sampling_interval = 18e-3 # single view, seconds # build the olci instrument scan line angles scanline_angles = np.linspace(np.deg2rad(scan_angle_west), np.deg2rad(scan_angle_east), scan_len) inst = np.vstack((scanline_angles, np.zeros(scan_len,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(np.zeros_like(scanline_angles), [np.int(scans_nb), 1]) # if apply_offset: # offset = np.arange(np.int(scans_nb)) * frequency # times += np.expand_dims(offset, 1) return ScanGeometry(inst, times)
python
def olci(scans_nb, scan_points=None): """Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage """ if scan_points is None: scan_len = 4000 # samples per scan scan_points = np.arange(4000) else: scan_len = len(scan_points) # scan_rate = 0.044 # single scan, seconds scan_angle_west = 46.5 # swath, degrees scan_angle_east = -22.1 # swath, degrees # sampling_interval = 18e-3 # single view, seconds # build the olci instrument scan line angles scanline_angles = np.linspace(np.deg2rad(scan_angle_west), np.deg2rad(scan_angle_east), scan_len) inst = np.vstack((scanline_angles, np.zeros(scan_len,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(np.zeros_like(scanline_angles), [np.int(scans_nb), 1]) # if apply_offset: # offset = np.arange(np.int(scans_nb)) * frequency # times += np.expand_dims(offset, 1) return ScanGeometry(inst, times)
[ "def", "olci", "(", "scans_nb", ",", "scan_points", "=", "None", ")", ":", "if", "scan_points", "is", "None", ":", "scan_len", "=", "4000", "# samples per scan", "scan_points", "=", "np", ".", "arange", "(", "4000", ")", "else", ":", "scan_len", "=", "len", "(", "scan_points", ")", "# scan_rate = 0.044 # single scan, seconds", "scan_angle_west", "=", "46.5", "# swath, degrees", "scan_angle_east", "=", "-", "22.1", "# swath, degrees", "# sampling_interval = 18e-3 # single view, seconds", "# build the olci instrument scan line angles", "scanline_angles", "=", "np", ".", "linspace", "(", "np", ".", "deg2rad", "(", "scan_angle_west", ")", ",", "np", ".", "deg2rad", "(", "scan_angle_east", ")", ",", "scan_len", ")", "inst", "=", "np", ".", "vstack", "(", "(", "scanline_angles", ",", "np", ".", "zeros", "(", "scan_len", ",", ")", ")", ")", "inst", "=", "np", ".", "tile", "(", "inst", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ",", "[", "1", ",", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "# building the corresponding times array", "# times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1])", "# + np.expand_dims(offset, 1))", "times", "=", "np", ".", "tile", "(", "np", ".", "zeros_like", "(", "scanline_angles", ")", ",", "[", "np", ".", "int", "(", "scans_nb", ")", ",", "1", "]", ")", "# if apply_offset:", "# offset = np.arange(np.int(scans_nb)) * frequency", "# times += np.expand_dims(offset, 1)", "return", "ScanGeometry", "(", "inst", ",", "times", ")" ]
Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage
[ "Definition", "of", "the", "OLCI", "instrument", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L434-L466
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
ascat
def ascat(scan_nb, scan_points=None): """ASCAT make two scans one to the left and one to the right of the sub-satellite track. """ if scan_points is None: scan_len = 42 # samples per scan scan_points = np.arange(42) else: scan_len = len(scan_points) scan_angle_inner = -25.0 # swath, degrees scan_angle_outer = -53.0 # swath, degrees scan_rate = 3.74747474747 # single scan, seconds if scan_len < 2: raise ValueError("Need at least two scan points!") sampling_interval = scan_rate / float(np.max(scan_points) + 1) # build the Metop/ascat instrument scan line angles scanline_angles_one = np.linspace(-np.deg2rad(scan_angle_outer), -np.deg2rad(scan_angle_inner), 21) scanline_angles_two = np.linspace(np.deg2rad(scan_angle_inner), np.deg2rad(scan_angle_outer), 21) scan_angles = np.concatenate( [scanline_angles_one, scanline_angles_two])[scan_points] inst = np.vstack((scan_angles, np.zeros(scan_len * 1,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scan_nb), 1]) # building the corresponding times array offset = np.arange(scan_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scan_nb), 1]) + np.expand_dims(offset, 1)) return ScanGeometry(inst, times)
python
def ascat(scan_nb, scan_points=None): """ASCAT make two scans one to the left and one to the right of the sub-satellite track. """ if scan_points is None: scan_len = 42 # samples per scan scan_points = np.arange(42) else: scan_len = len(scan_points) scan_angle_inner = -25.0 # swath, degrees scan_angle_outer = -53.0 # swath, degrees scan_rate = 3.74747474747 # single scan, seconds if scan_len < 2: raise ValueError("Need at least two scan points!") sampling_interval = scan_rate / float(np.max(scan_points) + 1) # build the Metop/ascat instrument scan line angles scanline_angles_one = np.linspace(-np.deg2rad(scan_angle_outer), -np.deg2rad(scan_angle_inner), 21) scanline_angles_two = np.linspace(np.deg2rad(scan_angle_inner), np.deg2rad(scan_angle_outer), 21) scan_angles = np.concatenate( [scanline_angles_one, scanline_angles_two])[scan_points] inst = np.vstack((scan_angles, np.zeros(scan_len * 1,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scan_nb), 1]) # building the corresponding times array offset = np.arange(scan_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scan_nb), 1]) + np.expand_dims(offset, 1)) return ScanGeometry(inst, times)
[ "def", "ascat", "(", "scan_nb", ",", "scan_points", "=", "None", ")", ":", "if", "scan_points", "is", "None", ":", "scan_len", "=", "42", "# samples per scan", "scan_points", "=", "np", ".", "arange", "(", "42", ")", "else", ":", "scan_len", "=", "len", "(", "scan_points", ")", "scan_angle_inner", "=", "-", "25.0", "# swath, degrees", "scan_angle_outer", "=", "-", "53.0", "# swath, degrees", "scan_rate", "=", "3.74747474747", "# single scan, seconds", "if", "scan_len", "<", "2", ":", "raise", "ValueError", "(", "\"Need at least two scan points!\"", ")", "sampling_interval", "=", "scan_rate", "/", "float", "(", "np", ".", "max", "(", "scan_points", ")", "+", "1", ")", "# build the Metop/ascat instrument scan line angles", "scanline_angles_one", "=", "np", ".", "linspace", "(", "-", "np", ".", "deg2rad", "(", "scan_angle_outer", ")", ",", "-", "np", ".", "deg2rad", "(", "scan_angle_inner", ")", ",", "21", ")", "scanline_angles_two", "=", "np", ".", "linspace", "(", "np", ".", "deg2rad", "(", "scan_angle_inner", ")", ",", "np", ".", "deg2rad", "(", "scan_angle_outer", ")", ",", "21", ")", "scan_angles", "=", "np", ".", "concatenate", "(", "[", "scanline_angles_one", ",", "scanline_angles_two", "]", ")", "[", "scan_points", "]", "inst", "=", "np", ".", "vstack", "(", "(", "scan_angles", ",", "np", ".", "zeros", "(", "scan_len", "*", "1", ",", ")", ")", ")", "inst", "=", "np", ".", "tile", "(", "inst", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ",", "[", "1", ",", "np", ".", "int", "(", "scan_nb", ")", ",", "1", "]", ")", "# building the corresponding times array", "offset", "=", "np", ".", "arange", "(", "scan_nb", ")", "*", "scan_rate", "times", "=", "(", "np", ".", "tile", "(", "scan_points", "*", "sampling_interval", ",", "[", "np", ".", "int", "(", "scan_nb", ")", ",", "1", "]", ")", "+", "np", ".", "expand_dims", "(", "offset", ",", "1", ")", ")", "return", "ScanGeometry", "(", "inst", ",", "times", ")" ]
ASCAT make two scans one to the left and one to the right of the sub-satellite track.
[ "ASCAT", "make", "two", "scans", "one", "to", "the", "left", "and", "one", "to", "the", "right", "of", "the", "sub", "-", "satellite", "track", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L469-L507
pytroll/pyorbital
pyorbital/tlefile.py
read_platform_numbers
def read_platform_numbers(in_upper=False, num_as_int=False): """Read platform numbers from $PPP_CONFIG_DIR/platforms.txt if available.""" out_dict = {} os.getenv('PPP_CONFIG_DIR', PKG_CONFIG_DIR) platform_file = None if 'PPP_CONFIG_DIR' in os.environ: platform_file = os.path.join(os.environ['PPP_CONFIG_DIR'], 'platforms.txt') if not platform_file or not os.path.isfile(platform_file): platform_file = os.path.join(PKG_CONFIG_DIR, 'platforms.txt') try: fid = open(platform_file, 'r') except IOError: LOGGER.error("Platform file %s not found.", platform_file) return out_dict for row in fid: # skip comment lines if not row.startswith('#'): parts = row.split() if len(parts) < 2: continue if in_upper: parts[0] = parts[0].upper() if num_as_int: parts[1] = int(parts[1]) out_dict[parts[0]] = parts[1] fid.close() return out_dict
python
def read_platform_numbers(in_upper=False, num_as_int=False): """Read platform numbers from $PPP_CONFIG_DIR/platforms.txt if available.""" out_dict = {} os.getenv('PPP_CONFIG_DIR', PKG_CONFIG_DIR) platform_file = None if 'PPP_CONFIG_DIR' in os.environ: platform_file = os.path.join(os.environ['PPP_CONFIG_DIR'], 'platforms.txt') if not platform_file or not os.path.isfile(platform_file): platform_file = os.path.join(PKG_CONFIG_DIR, 'platforms.txt') try: fid = open(platform_file, 'r') except IOError: LOGGER.error("Platform file %s not found.", platform_file) return out_dict for row in fid: # skip comment lines if not row.startswith('#'): parts = row.split() if len(parts) < 2: continue if in_upper: parts[0] = parts[0].upper() if num_as_int: parts[1] = int(parts[1]) out_dict[parts[0]] = parts[1] fid.close() return out_dict
[ "def", "read_platform_numbers", "(", "in_upper", "=", "False", ",", "num_as_int", "=", "False", ")", ":", "out_dict", "=", "{", "}", "os", ".", "getenv", "(", "'PPP_CONFIG_DIR'", ",", "PKG_CONFIG_DIR", ")", "platform_file", "=", "None", "if", "'PPP_CONFIG_DIR'", "in", "os", ".", "environ", ":", "platform_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'PPP_CONFIG_DIR'", "]", ",", "'platforms.txt'", ")", "if", "not", "platform_file", "or", "not", "os", ".", "path", ".", "isfile", "(", "platform_file", ")", ":", "platform_file", "=", "os", ".", "path", ".", "join", "(", "PKG_CONFIG_DIR", ",", "'platforms.txt'", ")", "try", ":", "fid", "=", "open", "(", "platform_file", ",", "'r'", ")", "except", "IOError", ":", "LOGGER", ".", "error", "(", "\"Platform file %s not found.\"", ",", "platform_file", ")", "return", "out_dict", "for", "row", "in", "fid", ":", "# skip comment lines", "if", "not", "row", ".", "startswith", "(", "'#'", ")", ":", "parts", "=", "row", ".", "split", "(", ")", "if", "len", "(", "parts", ")", "<", "2", ":", "continue", "if", "in_upper", ":", "parts", "[", "0", "]", "=", "parts", "[", "0", "]", ".", "upper", "(", ")", "if", "num_as_int", ":", "parts", "[", "1", "]", "=", "int", "(", "parts", "[", "1", "]", ")", "out_dict", "[", "parts", "[", "0", "]", "]", "=", "parts", "[", "1", "]", "fid", ".", "close", "(", ")", "return", "out_dict" ]
Read platform numbers from $PPP_CONFIG_DIR/platforms.txt if available.
[ "Read", "platform", "numbers", "from", "$PPP_CONFIG_DIR", "/", "platforms", ".", "txt", "if", "available", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L49-L77
pytroll/pyorbital
pyorbital/tlefile.py
read
def read(platform, tle_file=None, line1=None, line2=None): """Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided. """ return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)
python
def read(platform, tle_file=None, line1=None, line2=None): """Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided. """ return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)
[ "def", "read", "(", "platform", ",", "tle_file", "=", "None", ",", "line1", "=", "None", ",", "line2", "=", "None", ")", ":", "return", "Tle", "(", "platform", ",", "tle_file", "=", "tle_file", ",", "line1", "=", "line1", ",", "line2", "=", "line2", ")" ]
Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided.
[ "Read", "TLE", "for", "platform", "from", "tle_file" ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L91-L97
pytroll/pyorbital
pyorbital/tlefile.py
fetch
def fetch(destination): """Fetch TLE from internet and save it to `destination`.""" with io.open(destination, mode="w", encoding="utf-8") as dest: for url in TLE_URLS: response = urlopen(url) dest.write(response.read().decode("utf-8"))
python
def fetch(destination): """Fetch TLE from internet and save it to `destination`.""" with io.open(destination, mode="w", encoding="utf-8") as dest: for url in TLE_URLS: response = urlopen(url) dest.write(response.read().decode("utf-8"))
[ "def", "fetch", "(", "destination", ")", ":", "with", "io", ".", "open", "(", "destination", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "dest", ":", "for", "url", "in", "TLE_URLS", ":", "response", "=", "urlopen", "(", "url", ")", "dest", ".", "write", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")" ]
Fetch TLE from internet and save it to `destination`.
[ "Fetch", "TLE", "from", "internet", "and", "save", "it", "to", "destination", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L100-L105
pytroll/pyorbital
pyorbital/tlefile.py
Tle._checksum
def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
python
def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
[ "def", "_checksum", "(", "self", ")", ":", "for", "line", "in", "[", "self", ".", "_line1", ",", "self", ".", "_line2", "]", ":", "check", "=", "0", "for", "char", "in", "line", "[", ":", "-", "1", "]", ":", "if", "char", ".", "isdigit", "(", ")", ":", "check", "+=", "int", "(", "char", ")", "if", "char", "==", "\"-\"", ":", "check", "+=", "1", "if", "(", "check", "%", "10", ")", "!=", "int", "(", "line", "[", "-", "1", "]", ")", ":", "raise", "ChecksumError", "(", "self", ".", "_platform", "+", "\" \"", "+", "line", ")" ]
Performs the checksum for the current TLE.
[ "Performs", "the", "checksum", "for", "the", "current", "TLE", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L162-L173
pytroll/pyorbital
pyorbital/tlefile.py
Tle._read_tle
def _read_tle(self): """Read TLE data.""" if self._line1 is not None and self._line2 is not None: tle = self._line1.strip() + "\n" + self._line2.strip() else: def _open(filename): return io.open(filename, 'rb') if self._tle_file: urls = (self._tle_file,) open_func = _open elif "TLES" in os.environ: # TODO: get the TLE file closest in time to the actual satellite # overpass, NOT the latest! urls = (max(glob.glob(os.environ["TLES"]), key=os.path.getctime), ) LOGGER.debug("Reading TLE from %s", urls[0]) open_func = _open else: LOGGER.debug("Fetch TLE from the internet.") urls = TLE_URLS open_func = urlopen tle = "" designator = "1 " + SATELLITES.get(self._platform, '') for url in urls: fid = open_func(url) for l_0 in fid: l_0 = l_0.decode('utf-8') if l_0.strip() == self._platform: l_1 = next(fid).decode('utf-8') l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() break if(self._platform in SATELLITES and l_0.strip().startswith(designator)): l_1 = l_0 l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() LOGGER.debug("Found platform %s, ID: %s", self._platform, SATELLITES[self._platform]) break fid.close() if tle: break if not tle: raise KeyError("Found no TLE entry for '%s'" % self._platform) self._line1, self._line2 = tle.split('\n')
python
def _read_tle(self): """Read TLE data.""" if self._line1 is not None and self._line2 is not None: tle = self._line1.strip() + "\n" + self._line2.strip() else: def _open(filename): return io.open(filename, 'rb') if self._tle_file: urls = (self._tle_file,) open_func = _open elif "TLES" in os.environ: # TODO: get the TLE file closest in time to the actual satellite # overpass, NOT the latest! urls = (max(glob.glob(os.environ["TLES"]), key=os.path.getctime), ) LOGGER.debug("Reading TLE from %s", urls[0]) open_func = _open else: LOGGER.debug("Fetch TLE from the internet.") urls = TLE_URLS open_func = urlopen tle = "" designator = "1 " + SATELLITES.get(self._platform, '') for url in urls: fid = open_func(url) for l_0 in fid: l_0 = l_0.decode('utf-8') if l_0.strip() == self._platform: l_1 = next(fid).decode('utf-8') l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() break if(self._platform in SATELLITES and l_0.strip().startswith(designator)): l_1 = l_0 l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() LOGGER.debug("Found platform %s, ID: %s", self._platform, SATELLITES[self._platform]) break fid.close() if tle: break if not tle: raise KeyError("Found no TLE entry for '%s'" % self._platform) self._line1, self._line2 = tle.split('\n')
[ "def", "_read_tle", "(", "self", ")", ":", "if", "self", ".", "_line1", "is", "not", "None", "and", "self", ".", "_line2", "is", "not", "None", ":", "tle", "=", "self", ".", "_line1", ".", "strip", "(", ")", "+", "\"\\n\"", "+", "self", ".", "_line2", ".", "strip", "(", ")", "else", ":", "def", "_open", "(", "filename", ")", ":", "return", "io", ".", "open", "(", "filename", ",", "'rb'", ")", "if", "self", ".", "_tle_file", ":", "urls", "=", "(", "self", ".", "_tle_file", ",", ")", "open_func", "=", "_open", "elif", "\"TLES\"", "in", "os", ".", "environ", ":", "# TODO: get the TLE file closest in time to the actual satellite", "# overpass, NOT the latest!", "urls", "=", "(", "max", "(", "glob", ".", "glob", "(", "os", ".", "environ", "[", "\"TLES\"", "]", ")", ",", "key", "=", "os", ".", "path", ".", "getctime", ")", ",", ")", "LOGGER", ".", "debug", "(", "\"Reading TLE from %s\"", ",", "urls", "[", "0", "]", ")", "open_func", "=", "_open", "else", ":", "LOGGER", ".", "debug", "(", "\"Fetch TLE from the internet.\"", ")", "urls", "=", "TLE_URLS", "open_func", "=", "urlopen", "tle", "=", "\"\"", "designator", "=", "\"1 \"", "+", "SATELLITES", ".", "get", "(", "self", ".", "_platform", ",", "''", ")", "for", "url", "in", "urls", ":", "fid", "=", "open_func", "(", "url", ")", "for", "l_0", "in", "fid", ":", "l_0", "=", "l_0", ".", "decode", "(", "'utf-8'", ")", "if", "l_0", ".", "strip", "(", ")", "==", "self", ".", "_platform", ":", "l_1", "=", "next", "(", "fid", ")", ".", "decode", "(", "'utf-8'", ")", "l_2", "=", "next", "(", "fid", ")", ".", "decode", "(", "'utf-8'", ")", "tle", "=", "l_1", ".", "strip", "(", ")", "+", "\"\\n\"", "+", "l_2", ".", "strip", "(", ")", "break", "if", "(", "self", ".", "_platform", "in", "SATELLITES", "and", "l_0", ".", "strip", "(", ")", ".", "startswith", "(", "designator", ")", ")", ":", "l_1", "=", "l_0", "l_2", "=", "next", "(", "fid", ")", ".", "decode", "(", "'utf-8'", ")", "tle", "=", "l_1", ".", "strip", "(", ")", "+", "\"\\n\"", "+", "l_2", ".", "strip", "(", ")", "LOGGER", ".", "debug", "(", "\"Found platform %s, ID: %s\"", ",", "self", ".", "_platform", ",", "SATELLITES", "[", "self", ".", "_platform", "]", ")", "break", "fid", ".", "close", "(", ")", "if", "tle", ":", "break", "if", "not", "tle", ":", "raise", "KeyError", "(", "\"Found no TLE entry for '%s'\"", "%", "self", ".", "_platform", ")", "self", ".", "_line1", ",", "self", ".", "_line2", "=", "tle", ".", "split", "(", "'\\n'", ")" ]
Read TLE data.
[ "Read", "TLE", "data", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L175-L225
pytroll/pyorbital
pyorbital/tlefile.py
Tle._parse_tle
def _parse_tle(self): """Parse values from TLE data.""" def _read_tle_decimal(rep): """Convert *rep* to decimal value.""" if rep[0] in ["-", " ", "+"]: digits = rep[1:-2].strip() val = rep[0] + "." + digits + "e" + rep[-2:] else: digits = rep[:-2].strip() val = "." + digits + "e" + rep[-2:] return float(val) self.satnumber = self._line1[2:7] self.classification = self._line1[7] self.id_launch_year = self._line1[9:11] self.id_launch_number = self._line1[11:14] self.id_launch_piece = self._line1[14:17] self.epoch_year = self._line1[18:20] self.epoch_day = float(self._line1[20:32]) self.epoch = \ np.datetime64(datetime.datetime.strptime(self.epoch_year, "%y") + datetime.timedelta(days=self.epoch_day - 1), 'us') self.mean_motion_derivative = float(self._line1[33:43]) self.mean_motion_sec_derivative = _read_tle_decimal(self._line1[44:52]) self.bstar = _read_tle_decimal(self._line1[53:61]) try: self.ephemeris_type = int(self._line1[62]) except ValueError: self.ephemeris_type = 0 self.element_number = int(self._line1[64:68]) self.inclination = float(self._line2[8:16]) self.right_ascension = float(self._line2[17:25]) self.excentricity = int(self._line2[26:33]) * 10 ** -7 self.arg_perigee = float(self._line2[34:42]) self.mean_anomaly = float(self._line2[43:51]) self.mean_motion = float(self._line2[52:63]) self.orbit = int(self._line2[63:68])
python
def _parse_tle(self): """Parse values from TLE data.""" def _read_tle_decimal(rep): """Convert *rep* to decimal value.""" if rep[0] in ["-", " ", "+"]: digits = rep[1:-2].strip() val = rep[0] + "." + digits + "e" + rep[-2:] else: digits = rep[:-2].strip() val = "." + digits + "e" + rep[-2:] return float(val) self.satnumber = self._line1[2:7] self.classification = self._line1[7] self.id_launch_year = self._line1[9:11] self.id_launch_number = self._line1[11:14] self.id_launch_piece = self._line1[14:17] self.epoch_year = self._line1[18:20] self.epoch_day = float(self._line1[20:32]) self.epoch = \ np.datetime64(datetime.datetime.strptime(self.epoch_year, "%y") + datetime.timedelta(days=self.epoch_day - 1), 'us') self.mean_motion_derivative = float(self._line1[33:43]) self.mean_motion_sec_derivative = _read_tle_decimal(self._line1[44:52]) self.bstar = _read_tle_decimal(self._line1[53:61]) try: self.ephemeris_type = int(self._line1[62]) except ValueError: self.ephemeris_type = 0 self.element_number = int(self._line1[64:68]) self.inclination = float(self._line2[8:16]) self.right_ascension = float(self._line2[17:25]) self.excentricity = int(self._line2[26:33]) * 10 ** -7 self.arg_perigee = float(self._line2[34:42]) self.mean_anomaly = float(self._line2[43:51]) self.mean_motion = float(self._line2[52:63]) self.orbit = int(self._line2[63:68])
[ "def", "_parse_tle", "(", "self", ")", ":", "def", "_read_tle_decimal", "(", "rep", ")", ":", "\"\"\"Convert *rep* to decimal value.\"\"\"", "if", "rep", "[", "0", "]", "in", "[", "\"-\"", ",", "\" \"", ",", "\"+\"", "]", ":", "digits", "=", "rep", "[", "1", ":", "-", "2", "]", ".", "strip", "(", ")", "val", "=", "rep", "[", "0", "]", "+", "\".\"", "+", "digits", "+", "\"e\"", "+", "rep", "[", "-", "2", ":", "]", "else", ":", "digits", "=", "rep", "[", ":", "-", "2", "]", ".", "strip", "(", ")", "val", "=", "\".\"", "+", "digits", "+", "\"e\"", "+", "rep", "[", "-", "2", ":", "]", "return", "float", "(", "val", ")", "self", ".", "satnumber", "=", "self", ".", "_line1", "[", "2", ":", "7", "]", "self", ".", "classification", "=", "self", ".", "_line1", "[", "7", "]", "self", ".", "id_launch_year", "=", "self", ".", "_line1", "[", "9", ":", "11", "]", "self", ".", "id_launch_number", "=", "self", ".", "_line1", "[", "11", ":", "14", "]", "self", ".", "id_launch_piece", "=", "self", ".", "_line1", "[", "14", ":", "17", "]", "self", ".", "epoch_year", "=", "self", ".", "_line1", "[", "18", ":", "20", "]", "self", ".", "epoch_day", "=", "float", "(", "self", ".", "_line1", "[", "20", ":", "32", "]", ")", "self", ".", "epoch", "=", "np", ".", "datetime64", "(", "datetime", ".", "datetime", ".", "strptime", "(", "self", ".", "epoch_year", ",", "\"%y\"", ")", "+", "datetime", ".", "timedelta", "(", "days", "=", "self", ".", "epoch_day", "-", "1", ")", ",", "'us'", ")", "self", ".", "mean_motion_derivative", "=", "float", "(", "self", ".", "_line1", "[", "33", ":", "43", "]", ")", "self", ".", "mean_motion_sec_derivative", "=", "_read_tle_decimal", "(", "self", ".", "_line1", "[", "44", ":", "52", "]", ")", "self", ".", "bstar", "=", "_read_tle_decimal", "(", "self", ".", "_line1", "[", "53", ":", "61", "]", ")", "try", ":", "self", ".", "ephemeris_type", "=", "int", "(", "self", ".", "_line1", "[", "62", "]", ")", "except", "ValueError", ":", "self", ".", "ephemeris_type", "=", "0", "self", ".", "element_number", "=", "int", "(", "self", ".", "_line1", "[", "64", ":", "68", "]", ")", "self", ".", "inclination", "=", "float", "(", "self", ".", "_line2", "[", "8", ":", "16", "]", ")", "self", ".", "right_ascension", "=", "float", "(", "self", ".", "_line2", "[", "17", ":", "25", "]", ")", "self", ".", "excentricity", "=", "int", "(", "self", ".", "_line2", "[", "26", ":", "33", "]", ")", "*", "10", "**", "-", "7", "self", ".", "arg_perigee", "=", "float", "(", "self", ".", "_line2", "[", "34", ":", "42", "]", ")", "self", ".", "mean_anomaly", "=", "float", "(", "self", ".", "_line2", "[", "43", ":", "51", "]", ")", "self", ".", "mean_motion", "=", "float", "(", "self", ".", "_line2", "[", "52", ":", "63", "]", ")", "self", ".", "orbit", "=", "int", "(", "self", ".", "_line2", "[", "63", ":", "68", "]", ")" ]
Parse values from TLE data.
[ "Parse", "values", "from", "TLE", "data", "." ]
train
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L227-L266
lorien/user_agent
user_agent/base.py
fix_chrome_mac_platform
def fix_chrome_mac_platform(platform): """ Chrome on Mac OS adds minor version number and uses underscores instead of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11' but for Chrome it will be 'Intel Mac OS X 10_11_6'. :param platform: - string like "Macintosh; Intel Mac OS X 10.8" :return: platform with version number including minor number and formatted with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2" """ ver = platform.split('OS X ')[1] build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver]) build = choice(build_range) mac_ver = ver.replace('.', '_') + '_' + str(build) return 'Macintosh; Intel Mac OS X %s' % mac_ver
python
def fix_chrome_mac_platform(platform): """ Chrome on Mac OS adds minor version number and uses underscores instead of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11' but for Chrome it will be 'Intel Mac OS X 10_11_6'. :param platform: - string like "Macintosh; Intel Mac OS X 10.8" :return: platform with version number including minor number and formatted with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2" """ ver = platform.split('OS X ')[1] build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver]) build = choice(build_range) mac_ver = ver.replace('.', '_') + '_' + str(build) return 'Macintosh; Intel Mac OS X %s' % mac_ver
[ "def", "fix_chrome_mac_platform", "(", "platform", ")", ":", "ver", "=", "platform", ".", "split", "(", "'OS X '", ")", "[", "1", "]", "build_range", "=", "range", "(", "*", "MACOSX_CHROME_BUILD_RANGE", "[", "ver", "]", ")", "build", "=", "choice", "(", "build_range", ")", "mac_ver", "=", "ver", ".", "replace", "(", "'.'", ",", "'_'", ")", "+", "'_'", "+", "str", "(", "build", ")", "return", "'Macintosh; Intel Mac OS X %s'", "%", "mac_ver" ]
Chrome on Mac OS adds minor version number and uses underscores instead of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11' but for Chrome it will be 'Intel Mac OS X 10_11_6'. :param platform: - string like "Macintosh; Intel Mac OS X 10.8" :return: platform with version number including minor number and formatted with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2"
[ "Chrome", "on", "Mac", "OS", "adds", "minor", "version", "number", "and", "uses", "underscores", "instead", "of", "dots", ".", "E", ".", "g", ".", "platform", "for", "Firefox", "will", "be", ":", "Intel", "Mac", "OS", "X", "10", ".", "11", "but", "for", "Chrome", "it", "will", "be", "Intel", "Mac", "OS", "X", "10_11_6", "." ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L249-L263
lorien/user_agent
user_agent/base.py
build_system_components
def build_system_components(device_type, os_id, navigator_id): """ For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu """ if os_id == 'win': platform_version = choice(OS_PLATFORM['win']) cpu = choice(OS_CPU['win']) if cpu: platform = '%s; %s' % (platform_version, cpu) else: platform = platform_version res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': platform, } elif os_id == 'linux': cpu = choice(OS_CPU['linux']) platform_version = choice(OS_PLATFORM['linux']) platform = '%s %s' % (platform_version, cpu) res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': 'Linux %s' % cpu, } elif os_id == 'mac': cpu = choice(OS_CPU['mac']) platform_version = choice(OS_PLATFORM['mac']) platform = platform_version if navigator_id == 'chrome': platform = fix_chrome_mac_platform(platform) res = { 'platform_version': platform_version, 'platform': 'MacIntel', 'ua_platform': platform, 'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1], } elif os_id == 'android': assert navigator_id in ('firefox', 'chrome') assert device_type in ('smartphone', 'tablet') platform_version = choice(OS_PLATFORM['android']) if navigator_id == 'firefox': if device_type == 'smartphone': ua_platform = '%s; Mobile' % platform_version elif device_type == 'tablet': ua_platform = '%s; Tablet' % platform_version elif navigator_id == 'chrome': device_id = choice(SMARTPHONE_DEV_IDS) ua_platform = 'Linux; %s; %s' % (platform_version, device_id) oscpu = 'Linux %s' % choice(OS_CPU['android']) res = { 'platform_version': platform_version, 'ua_platform': ua_platform, 'platform': oscpu, 'oscpu': oscpu, } return res
python
def build_system_components(device_type, os_id, navigator_id): """ For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu """ if os_id == 'win': platform_version = choice(OS_PLATFORM['win']) cpu = choice(OS_CPU['win']) if cpu: platform = '%s; %s' % (platform_version, cpu) else: platform = platform_version res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': platform, } elif os_id == 'linux': cpu = choice(OS_CPU['linux']) platform_version = choice(OS_PLATFORM['linux']) platform = '%s %s' % (platform_version, cpu) res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': 'Linux %s' % cpu, } elif os_id == 'mac': cpu = choice(OS_CPU['mac']) platform_version = choice(OS_PLATFORM['mac']) platform = platform_version if navigator_id == 'chrome': platform = fix_chrome_mac_platform(platform) res = { 'platform_version': platform_version, 'platform': 'MacIntel', 'ua_platform': platform, 'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1], } elif os_id == 'android': assert navigator_id in ('firefox', 'chrome') assert device_type in ('smartphone', 'tablet') platform_version = choice(OS_PLATFORM['android']) if navigator_id == 'firefox': if device_type == 'smartphone': ua_platform = '%s; Mobile' % platform_version elif device_type == 'tablet': ua_platform = '%s; Tablet' % platform_version elif navigator_id == 'chrome': device_id = choice(SMARTPHONE_DEV_IDS) ua_platform = 'Linux; %s; %s' % (platform_version, device_id) oscpu = 'Linux %s' % choice(OS_CPU['android']) res = { 'platform_version': platform_version, 'ua_platform': ua_platform, 'platform': oscpu, 'oscpu': oscpu, } return res
[ "def", "build_system_components", "(", "device_type", ",", "os_id", ",", "navigator_id", ")", ":", "if", "os_id", "==", "'win'", ":", "platform_version", "=", "choice", "(", "OS_PLATFORM", "[", "'win'", "]", ")", "cpu", "=", "choice", "(", "OS_CPU", "[", "'win'", "]", ")", "if", "cpu", ":", "platform", "=", "'%s; %s'", "%", "(", "platform_version", ",", "cpu", ")", "else", ":", "platform", "=", "platform_version", "res", "=", "{", "'platform_version'", ":", "platform_version", ",", "'platform'", ":", "platform", ",", "'ua_platform'", ":", "platform", ",", "'oscpu'", ":", "platform", ",", "}", "elif", "os_id", "==", "'linux'", ":", "cpu", "=", "choice", "(", "OS_CPU", "[", "'linux'", "]", ")", "platform_version", "=", "choice", "(", "OS_PLATFORM", "[", "'linux'", "]", ")", "platform", "=", "'%s %s'", "%", "(", "platform_version", ",", "cpu", ")", "res", "=", "{", "'platform_version'", ":", "platform_version", ",", "'platform'", ":", "platform", ",", "'ua_platform'", ":", "platform", ",", "'oscpu'", ":", "'Linux %s'", "%", "cpu", ",", "}", "elif", "os_id", "==", "'mac'", ":", "cpu", "=", "choice", "(", "OS_CPU", "[", "'mac'", "]", ")", "platform_version", "=", "choice", "(", "OS_PLATFORM", "[", "'mac'", "]", ")", "platform", "=", "platform_version", "if", "navigator_id", "==", "'chrome'", ":", "platform", "=", "fix_chrome_mac_platform", "(", "platform", ")", "res", "=", "{", "'platform_version'", ":", "platform_version", ",", "'platform'", ":", "'MacIntel'", ",", "'ua_platform'", ":", "platform", ",", "'oscpu'", ":", "'Intel Mac OS X %s'", "%", "platform", ".", "split", "(", "' '", ")", "[", "-", "1", "]", ",", "}", "elif", "os_id", "==", "'android'", ":", "assert", "navigator_id", "in", "(", "'firefox'", ",", "'chrome'", ")", "assert", "device_type", "in", "(", "'smartphone'", ",", "'tablet'", ")", "platform_version", "=", "choice", "(", "OS_PLATFORM", "[", "'android'", "]", ")", "if", "navigator_id", "==", "'firefox'", ":", "if", "device_type", "==", "'smartphone'", ":", "ua_platform", "=", "'%s; Mobile'", "%", "platform_version", "elif", "device_type", "==", "'tablet'", ":", "ua_platform", "=", "'%s; Tablet'", "%", "platform_version", "elif", "navigator_id", "==", "'chrome'", ":", "device_id", "=", "choice", "(", "SMARTPHONE_DEV_IDS", ")", "ua_platform", "=", "'Linux; %s; %s'", "%", "(", "platform_version", ",", "device_id", ")", "oscpu", "=", "'Linux %s'", "%", "choice", "(", "OS_CPU", "[", "'android'", "]", ")", "res", "=", "{", "'platform_version'", ":", "platform_version", ",", "'ua_platform'", ":", "ua_platform", ",", "'platform'", ":", "oscpu", ",", "'oscpu'", ":", "oscpu", ",", "}", "return", "res" ]
For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu
[ "For", "given", "os_id", "build", "random", "platform", "and", "oscpu", "components" ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L266-L333
lorien/user_agent
user_agent/base.py
build_app_components
def build_app_components(os_id, navigator_id): """ For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id} """ if navigator_id == 'firefox': build_version, build_id = get_firefox_build() if os_id in ('win', 'linux', 'mac'): geckotrail = '20100101' else: geckotrail = build_version res = { 'name': 'Netscape', 'product_sub': '20100101', 'vendor': '', 'build_version': build_version, 'build_id': build_id, 'geckotrail': geckotrail, } elif navigator_id == 'chrome': res = { 'name': 'Netscape', 'product_sub': '20030107', 'vendor': 'Google Inc.', 'build_version': get_chrome_build(), 'build_id': None, } elif navigator_id == 'ie': num_ver, build_version, trident_version = get_ie_build() if num_ver >= 11: app_name = 'Netscape' else: app_name = 'Microsoft Internet Explorer' res = { 'name': app_name, 'product_sub': None, 'vendor': '', 'build_version': build_version, 'build_id': None, 'trident_version': trident_version, } return res
python
def build_app_components(os_id, navigator_id): """ For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id} """ if navigator_id == 'firefox': build_version, build_id = get_firefox_build() if os_id in ('win', 'linux', 'mac'): geckotrail = '20100101' else: geckotrail = build_version res = { 'name': 'Netscape', 'product_sub': '20100101', 'vendor': '', 'build_version': build_version, 'build_id': build_id, 'geckotrail': geckotrail, } elif navigator_id == 'chrome': res = { 'name': 'Netscape', 'product_sub': '20030107', 'vendor': 'Google Inc.', 'build_version': get_chrome_build(), 'build_id': None, } elif navigator_id == 'ie': num_ver, build_version, trident_version = get_ie_build() if num_ver >= 11: app_name = 'Netscape' else: app_name = 'Microsoft Internet Explorer' res = { 'name': app_name, 'product_sub': None, 'vendor': '', 'build_version': build_version, 'build_id': None, 'trident_version': trident_version, } return res
[ "def", "build_app_components", "(", "os_id", ",", "navigator_id", ")", ":", "if", "navigator_id", "==", "'firefox'", ":", "build_version", ",", "build_id", "=", "get_firefox_build", "(", ")", "if", "os_id", "in", "(", "'win'", ",", "'linux'", ",", "'mac'", ")", ":", "geckotrail", "=", "'20100101'", "else", ":", "geckotrail", "=", "build_version", "res", "=", "{", "'name'", ":", "'Netscape'", ",", "'product_sub'", ":", "'20100101'", ",", "'vendor'", ":", "''", ",", "'build_version'", ":", "build_version", ",", "'build_id'", ":", "build_id", ",", "'geckotrail'", ":", "geckotrail", ",", "}", "elif", "navigator_id", "==", "'chrome'", ":", "res", "=", "{", "'name'", ":", "'Netscape'", ",", "'product_sub'", ":", "'20030107'", ",", "'vendor'", ":", "'Google Inc.'", ",", "'build_version'", ":", "get_chrome_build", "(", ")", ",", "'build_id'", ":", "None", ",", "}", "elif", "navigator_id", "==", "'ie'", ":", "num_ver", ",", "build_version", ",", "trident_version", "=", "get_ie_build", "(", ")", "if", "num_ver", ">=", "11", ":", "app_name", "=", "'Netscape'", "else", ":", "app_name", "=", "'Microsoft Internet Explorer'", "res", "=", "{", "'name'", ":", "app_name", ",", "'product_sub'", ":", "None", ",", "'vendor'", ":", "''", ",", "'build_version'", ":", "build_version", ",", "'build_id'", ":", "None", ",", "'trident_version'", ":", "trident_version", ",", "}", "return", "res" ]
For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id}
[ "For", "given", "navigator_id", "build", "app", "features" ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L336-L379
lorien/user_agent
user_agent/base.py
get_option_choices
def get_option_choices(opt_name, opt_value, default_value, all_choices): """ Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value` """ choices = [] if isinstance(opt_value, six.string_types): choices = [opt_value] elif isinstance(opt_value, (list, tuple)): choices = list(opt_value) elif opt_value is None: choices = default_value else: raise InvalidOption('Option %s has invalid' ' value: %s' % (opt_name, opt_value)) if 'all' in choices: choices = all_choices for item in choices: if item not in all_choices: raise InvalidOption('Choices of option %s contains invalid' ' item: %s' % (opt_name, item)) return choices
python
def get_option_choices(opt_name, opt_value, default_value, all_choices): """ Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value` """ choices = [] if isinstance(opt_value, six.string_types): choices = [opt_value] elif isinstance(opt_value, (list, tuple)): choices = list(opt_value) elif opt_value is None: choices = default_value else: raise InvalidOption('Option %s has invalid' ' value: %s' % (opt_name, opt_value)) if 'all' in choices: choices = all_choices for item in choices: if item not in all_choices: raise InvalidOption('Choices of option %s contains invalid' ' item: %s' % (opt_name, item)) return choices
[ "def", "get_option_choices", "(", "opt_name", ",", "opt_value", ",", "default_value", ",", "all_choices", ")", ":", "choices", "=", "[", "]", "if", "isinstance", "(", "opt_value", ",", "six", ".", "string_types", ")", ":", "choices", "=", "[", "opt_value", "]", "elif", "isinstance", "(", "opt_value", ",", "(", "list", ",", "tuple", ")", ")", ":", "choices", "=", "list", "(", "opt_value", ")", "elif", "opt_value", "is", "None", ":", "choices", "=", "default_value", "else", ":", "raise", "InvalidOption", "(", "'Option %s has invalid'", "' value: %s'", "%", "(", "opt_name", ",", "opt_value", ")", ")", "if", "'all'", "in", "choices", ":", "choices", "=", "all_choices", "for", "item", "in", "choices", ":", "if", "item", "not", "in", "all_choices", ":", "raise", "InvalidOption", "(", "'Choices of option %s contains invalid'", "' item: %s'", "%", "(", "opt_name", ",", "item", ")", ")", "return", "choices" ]
Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value`
[ "Generate", "possible", "choices", "for", "the", "option", "opt_name", "limited", "to", "opt_value", "value", "with", "default", "value", "as", "default_value" ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L382-L405
lorien/user_agent
user_agent/base.py
pick_config_ids
def pick_config_ids(device_type, os, navigator): """ Select one random pair (device_type, os_id, navigator_id) from all possible combinations matching the given os and navigator filters. :param os: allowed os(es) :type os: string or list/tuple or None :param navigator: allowed browser engine(s) :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" """ if os is None: default_dev_types = ['desktop'] else: default_dev_types = list(DEVICE_TYPE_OS.keys()) dev_type_choices = get_option_choices( 'device_type', device_type, default_dev_types, list(DEVICE_TYPE_OS.keys()) ) os_choices = get_option_choices('os', os, list(OS_NAVIGATOR.keys()), list(OS_NAVIGATOR.keys())) nav_choices = get_option_choices('navigator', navigator, list(NAVIGATOR_OS.keys()), list(NAVIGATOR_OS.keys())) variants = [] for dev, os, nav in product(dev_type_choices, os_choices, nav_choices): if (os in DEVICE_TYPE_OS[dev] and nav in DEVICE_TYPE_NAVIGATOR[dev] and nav in OS_NAVIGATOR[os]): variants.append((dev, os, nav)) if not variants: raise InvalidOption('Options device_type, os and navigator' ' conflicts with each other') device_type, os_id, navigator_id = choice(variants) assert os_id in OS_PLATFORM assert navigator_id in NAVIGATOR_OS assert device_type in DEVICE_TYPE_OS return device_type, os_id, navigator_id
python
def pick_config_ids(device_type, os, navigator): """ Select one random pair (device_type, os_id, navigator_id) from all possible combinations matching the given os and navigator filters. :param os: allowed os(es) :type os: string or list/tuple or None :param navigator: allowed browser engine(s) :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" """ if os is None: default_dev_types = ['desktop'] else: default_dev_types = list(DEVICE_TYPE_OS.keys()) dev_type_choices = get_option_choices( 'device_type', device_type, default_dev_types, list(DEVICE_TYPE_OS.keys()) ) os_choices = get_option_choices('os', os, list(OS_NAVIGATOR.keys()), list(OS_NAVIGATOR.keys())) nav_choices = get_option_choices('navigator', navigator, list(NAVIGATOR_OS.keys()), list(NAVIGATOR_OS.keys())) variants = [] for dev, os, nav in product(dev_type_choices, os_choices, nav_choices): if (os in DEVICE_TYPE_OS[dev] and nav in DEVICE_TYPE_NAVIGATOR[dev] and nav in OS_NAVIGATOR[os]): variants.append((dev, os, nav)) if not variants: raise InvalidOption('Options device_type, os and navigator' ' conflicts with each other') device_type, os_id, navigator_id = choice(variants) assert os_id in OS_PLATFORM assert navigator_id in NAVIGATOR_OS assert device_type in DEVICE_TYPE_OS return device_type, os_id, navigator_id
[ "def", "pick_config_ids", "(", "device_type", ",", "os", ",", "navigator", ")", ":", "if", "os", "is", "None", ":", "default_dev_types", "=", "[", "'desktop'", "]", "else", ":", "default_dev_types", "=", "list", "(", "DEVICE_TYPE_OS", ".", "keys", "(", ")", ")", "dev_type_choices", "=", "get_option_choices", "(", "'device_type'", ",", "device_type", ",", "default_dev_types", ",", "list", "(", "DEVICE_TYPE_OS", ".", "keys", "(", ")", ")", ")", "os_choices", "=", "get_option_choices", "(", "'os'", ",", "os", ",", "list", "(", "OS_NAVIGATOR", ".", "keys", "(", ")", ")", ",", "list", "(", "OS_NAVIGATOR", ".", "keys", "(", ")", ")", ")", "nav_choices", "=", "get_option_choices", "(", "'navigator'", ",", "navigator", ",", "list", "(", "NAVIGATOR_OS", ".", "keys", "(", ")", ")", ",", "list", "(", "NAVIGATOR_OS", ".", "keys", "(", ")", ")", ")", "variants", "=", "[", "]", "for", "dev", ",", "os", ",", "nav", "in", "product", "(", "dev_type_choices", ",", "os_choices", ",", "nav_choices", ")", ":", "if", "(", "os", "in", "DEVICE_TYPE_OS", "[", "dev", "]", "and", "nav", "in", "DEVICE_TYPE_NAVIGATOR", "[", "dev", "]", "and", "nav", "in", "OS_NAVIGATOR", "[", "os", "]", ")", ":", "variants", ".", "append", "(", "(", "dev", ",", "os", ",", "nav", ")", ")", "if", "not", "variants", ":", "raise", "InvalidOption", "(", "'Options device_type, os and navigator'", "' conflicts with each other'", ")", "device_type", ",", "os_id", ",", "navigator_id", "=", "choice", "(", "variants", ")", "assert", "os_id", "in", "OS_PLATFORM", "assert", "navigator_id", "in", "NAVIGATOR_OS", "assert", "device_type", "in", "DEVICE_TYPE_OS", "return", "device_type", ",", "os_id", ",", "navigator_id" ]
Select one random pair (device_type, os_id, navigator_id) from all possible combinations matching the given os and navigator filters. :param os: allowed os(es) :type os: string or list/tuple or None :param navigator: allowed browser engine(s) :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all"
[ "Select", "one", "random", "pair", "(", "device_type", "os_id", "navigator_id", ")", "from", "all", "possible", "combinations", "matching", "the", "given", "os", "and", "navigator", "filters", "." ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L408-L455
lorien/user_agent
user_agent/base.py
generate_navigator
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid """ if platform is not None: os = platform warn('The `platform` option is deprecated.' ' Use `os` option instead.', stacklevel=3) device_type, os_id, navigator_id = ( pick_config_ids(device_type, os, navigator) ) system = build_system_components( device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template( device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system['platform_version'], user_agent) return { # ids 'os_id': os_id, 'navigator_id': navigator_id, # system components 'platform': system['platform'], 'oscpu': system['oscpu'], # app components 'build_version': app['build_version'], 'build_id': app['build_id'], 'app_version': app_version, 'app_name': app['name'], 'app_code_name': 'Mozilla', 'product': 'Gecko', 'product_sub': app['product_sub'], 'vendor': app['vendor'], 'vendor_sub': '', # compiled user agent 'user_agent': user_agent, }
python
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid """ if platform is not None: os = platform warn('The `platform` option is deprecated.' ' Use `os` option instead.', stacklevel=3) device_type, os_id, navigator_id = ( pick_config_ids(device_type, os, navigator) ) system = build_system_components( device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template( device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system['platform_version'], user_agent) return { # ids 'os_id': os_id, 'navigator_id': navigator_id, # system components 'platform': system['platform'], 'oscpu': system['oscpu'], # app components 'build_version': app['build_version'], 'build_id': app['build_id'], 'app_version': app_version, 'app_name': app['name'], 'app_code_name': 'Mozilla', 'product': 'Gecko', 'product_sub': app['product_sub'], 'vendor': app['vendor'], 'vendor_sub': '', # compiled user agent 'user_agent': user_agent, }
[ "def", "generate_navigator", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "if", "platform", "is", "not", "None", ":", "os", "=", "platform", "warn", "(", "'The `platform` option is deprecated.'", "' Use `os` option instead.'", ",", "stacklevel", "=", "3", ")", "device_type", ",", "os_id", ",", "navigator_id", "=", "(", "pick_config_ids", "(", "device_type", ",", "os", ",", "navigator", ")", ")", "system", "=", "build_system_components", "(", "device_type", ",", "os_id", ",", "navigator_id", ")", "app", "=", "build_app_components", "(", "os_id", ",", "navigator_id", ")", "ua_template", "=", "choose_ua_template", "(", "device_type", ",", "navigator_id", ",", "app", ")", "user_agent", "=", "ua_template", ".", "format", "(", "system", "=", "system", ",", "app", "=", "app", ")", "app_version", "=", "build_navigator_app_version", "(", "os_id", ",", "navigator_id", ",", "system", "[", "'platform_version'", "]", ",", "user_agent", ")", "return", "{", "# ids", "'os_id'", ":", "os_id", ",", "'navigator_id'", ":", "navigator_id", ",", "# system components", "'platform'", ":", "system", "[", "'platform'", "]", ",", "'oscpu'", ":", "system", "[", "'oscpu'", "]", ",", "# app components", "'build_version'", ":", "app", "[", "'build_version'", "]", ",", "'build_id'", ":", "app", "[", "'build_id'", "]", ",", "'app_version'", ":", "app_version", ",", "'app_name'", ":", "app", "[", "'name'", "]", ",", "'app_code_name'", ":", "'Mozilla'", ",", "'product'", ":", "'Gecko'", ",", "'product_sub'", ":", "app", "[", "'product_sub'", "]", ",", "'vendor'", ":", "app", "[", "'vendor'", "]", ",", "'vendor_sub'", ":", "''", ",", "# compiled user agent", "'user_agent'", ":", "user_agent", ",", "}" ]
Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "web", "navigator", "s", "config" ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L489-L546
lorien/user_agent
user_agent/base.py
generate_user_agent
def generate_user_agent(os=None, navigator=None, platform=None, device_type=None): """ Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ return generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type)['user_agent']
python
def generate_user_agent(os=None, navigator=None, platform=None, device_type=None): """ Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ return generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type)['user_agent']
[ "def", "generate_user_agent", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "return", "generate_navigator", "(", "os", "=", "os", ",", "navigator", "=", "navigator", ",", "platform", "=", "platform", ",", "device_type", "=", "device_type", ")", "[", "'user_agent'", "]" ]
Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "HTTP", "User", "-", "Agent", "header" ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L549-L569
lorien/user_agent
user_agent/base.py
generate_navigator_js
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ config = generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type) return { 'appCodeName': config['app_code_name'], 'appName': config['app_name'], 'appVersion': config['app_version'], 'platform': config['platform'], 'userAgent': config['user_agent'], 'oscpu': config['oscpu'], 'product': config['product'], 'productSub': config['product_sub'], 'vendor': config['vendor'], 'vendorSub': config['vendor_sub'], 'buildID': config['build_id'], }
python
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ config = generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type) return { 'appCodeName': config['app_code_name'], 'appName': config['app_name'], 'appVersion': config['app_version'], 'platform': config['platform'], 'userAgent': config['user_agent'], 'oscpu': config['oscpu'], 'product': config['product'], 'productSub': config['product_sub'], 'vendor': config['vendor'], 'vendorSub': config['vendor_sub'], 'buildID': config['build_id'], }
[ "def", "generate_navigator_js", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "config", "=", "generate_navigator", "(", "os", "=", "os", ",", "navigator", "=", "navigator", ",", "platform", "=", "platform", ",", "device_type", "=", "device_type", ")", "return", "{", "'appCodeName'", ":", "config", "[", "'app_code_name'", "]", ",", "'appName'", ":", "config", "[", "'app_name'", "]", ",", "'appVersion'", ":", "config", "[", "'app_version'", "]", ",", "'platform'", ":", "config", "[", "'platform'", "]", ",", "'userAgent'", ":", "config", "[", "'user_agent'", "]", ",", "'oscpu'", ":", "config", "[", "'oscpu'", "]", ",", "'product'", ":", "config", "[", "'product'", "]", ",", "'productSub'", ":", "config", "[", "'product_sub'", "]", ",", "'vendor'", ":", "config", "[", "'vendor'", "]", ",", "'vendorSub'", ":", "config", "[", "'vendor_sub'", "]", ",", "'buildID'", ":", "config", "[", "'build_id'", "]", ",", "}" ]
Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "web", "navigator", "s", "config", "with", "keys", "corresponding", "to", "keys", "of", "windows", ".", "navigator", "JavaScript", "object", "." ]
train
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L572-L607
thiezn/iperf3-python
iperf3/iperf3.py
more_data
def more_data(pipe_out): """Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool """ r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
python
def more_data(pipe_out): """Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool """ r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
[ "def", "more_data", "(", "pipe_out", ")", ":", "r", ",", "_", ",", "_", "=", "select", ".", "select", "(", "[", "pipe_out", "]", ",", "[", "]", ",", "[", "]", ",", "0", ")", "return", "bool", "(", "r", ")" ]
Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool
[ "Check", "if", "there", "is", "more", "data", "left", "on", "the", "pipe" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L40-L47
thiezn/iperf3-python
iperf3/iperf3.py
read_pipe
def read_pipe(pipe_out): """Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string """ out = b'' while more_data(pipe_out): out += os.read(pipe_out, 1024) return out.decode('utf-8')
python
def read_pipe(pipe_out): """Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string """ out = b'' while more_data(pipe_out): out += os.read(pipe_out, 1024) return out.decode('utf-8')
[ "def", "read_pipe", "(", "pipe_out", ")", ":", "out", "=", "b''", "while", "more_data", "(", "pipe_out", ")", ":", "out", "+=", "os", ".", "read", "(", "pipe_out", ",", "1024", ")", "return", "out", ".", "decode", "(", "'utf-8'", ")" ]
Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string
[ "Read", "data", "on", "a", "pipe" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L50-L62
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.role
def role(self): """The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's' """ try: self._role = c_char( self.lib.iperf_get_test_role(self._test) ).value.decode('utf-8') except TypeError: self._role = c_char( chr(self.lib.iperf_get_test_role(self._test)) ).value.decode('utf-8') return self._role
python
def role(self): """The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's' """ try: self._role = c_char( self.lib.iperf_get_test_role(self._test) ).value.decode('utf-8') except TypeError: self._role = c_char( chr(self.lib.iperf_get_test_role(self._test)) ).value.decode('utf-8') return self._role
[ "def", "role", "(", "self", ")", ":", "try", ":", "self", ".", "_role", "=", "c_char", "(", "self", ".", "lib", ".", "iperf_get_test_role", "(", "self", ".", "_test", ")", ")", ".", "value", ".", "decode", "(", "'utf-8'", ")", "except", "TypeError", ":", "self", ".", "_role", "=", "c_char", "(", "chr", "(", "self", ".", "lib", ".", "iperf_get_test_role", "(", "self", ".", "_test", ")", ")", ")", ".", "value", ".", "decode", "(", "'utf-8'", ")", "return", "self", ".", "_role" ]
The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's'
[ "The", "iperf3", "instance", "role" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L246-L261
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.bind_address
def bind_address(self): """The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string """ result = c_char_p( self.lib.iperf_get_test_bind_address(self._test) ).value if result: self._bind_address = result.decode('utf-8') else: self._bind_address = '*' return self._bind_address
python
def bind_address(self): """The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string """ result = c_char_p( self.lib.iperf_get_test_bind_address(self._test) ).value if result: self._bind_address = result.decode('utf-8') else: self._bind_address = '*' return self._bind_address
[ "def", "bind_address", "(", "self", ")", ":", "result", "=", "c_char_p", "(", "self", ".", "lib", ".", "iperf_get_test_bind_address", "(", "self", ".", "_test", ")", ")", ".", "value", "if", "result", ":", "self", ".", "_bind_address", "=", "result", ".", "decode", "(", "'utf-8'", ")", "else", ":", "self", ".", "_bind_address", "=", "'*'", "return", "self", ".", "_bind_address" ]
The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string
[ "The", "bind", "address", "the", "iperf3", "instance", "will", "listen", "on" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L275-L289
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.port
def port(self): """The port the iperf3 server is listening on""" self._port = self.lib.iperf_get_test_server_port(self._test) return self._port
python
def port(self): """The port the iperf3 server is listening on""" self._port = self.lib.iperf_get_test_server_port(self._test) return self._port
[ "def", "port", "(", "self", ")", ":", "self", ".", "_port", "=", "self", ".", "lib", ".", "iperf_get_test_server_port", "(", "self", ".", "_test", ")", "return", "self", ".", "_port" ]
The port the iperf3 server is listening on
[ "The", "port", "the", "iperf3", "server", "is", "listening", "on" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L300-L303
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.json_output
def json_output(self): """Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool """ enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
python
def json_output(self): """Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool """ enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
[ "def", "json_output", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_json_output", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_json_output", "=", "True", "else", ":", "self", ".", "_json_output", "=", "False", "return", "self", ".", "_json_output" ]
Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool
[ "Toggles", "json", "output", "of", "libiperf" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L311-L326
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.verbose
def verbose(self): """Toggles verbose output for the iperf3 instance :rtype: bool """ enabled = self.lib.iperf_get_verbose(self._test) if enabled: self._verbose = True else: self._verbose = False return self._verbose
python
def verbose(self): """Toggles verbose output for the iperf3 instance :rtype: bool """ enabled = self.lib.iperf_get_verbose(self._test) if enabled: self._verbose = True else: self._verbose = False return self._verbose
[ "def", "verbose", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_verbose", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_verbose", "=", "True", "else", ":", "self", ".", "_verbose", "=", "False", "return", "self", ".", "_verbose" ]
Toggles verbose output for the iperf3 instance :rtype: bool
[ "Toggles", "verbose", "output", "for", "the", "iperf3", "instance" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L338-L350
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.iperf_version
def iperf_version(self): """Returns the version of the libiperf library :rtype: string """ # TODO: Is there a better way to get the const char than allocating 30? VersionType = c_char * 30 return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
python
def iperf_version(self): """Returns the version of the libiperf library :rtype: string """ # TODO: Is there a better way to get the const char than allocating 30? VersionType = c_char * 30 return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
[ "def", "iperf_version", "(", "self", ")", ":", "# TODO: Is there a better way to get the const char than allocating 30?", "VersionType", "=", "c_char", "*", "30", "return", "VersionType", ".", "in_dll", "(", "self", ".", "lib", ",", "\"version\"", ")", ".", "value", ".", "decode", "(", "'utf-8'", ")" ]
Returns the version of the libiperf library :rtype: string
[ "Returns", "the", "version", "of", "the", "libiperf", "library" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L369-L376
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3._error_to_string
def _error_to_string(self, error_id): """Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string """ strerror = self.lib.iperf_strerror strerror.restype = c_char_p return strerror(error_id).decode('utf-8')
python
def _error_to_string(self, error_id): """Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string """ strerror = self.lib.iperf_strerror strerror.restype = c_char_p return strerror(error_id).decode('utf-8')
[ "def", "_error_to_string", "(", "self", ",", "error_id", ")", ":", "strerror", "=", "self", ".", "lib", ".", "iperf_strerror", "strerror", ".", "restype", "=", "c_char_p", "return", "strerror", "(", "error_id", ")", ".", "decode", "(", "'utf-8'", ")" ]
Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string
[ "Returns", "an", "error", "string", "from", "libiperf" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L378-L386
thiezn/iperf3-python
iperf3/iperf3.py
Client.server_hostname
def server_hostname(self): """The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string """ result = c_char_p( self.lib.iperf_get_test_server_hostname(self._test) ).value if result: self._server_hostname = result.decode('utf-8') else: self._server_hostname = None return self._server_hostname
python
def server_hostname(self): """The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string """ result = c_char_p( self.lib.iperf_get_test_server_hostname(self._test) ).value if result: self._server_hostname = result.decode('utf-8') else: self._server_hostname = None return self._server_hostname
[ "def", "server_hostname", "(", "self", ")", ":", "result", "=", "c_char_p", "(", "self", ".", "lib", ".", "iperf_get_test_server_hostname", "(", "self", ".", "_test", ")", ")", ".", "value", "if", "result", ":", "self", ".", "_server_hostname", "=", "result", ".", "decode", "(", "'utf-8'", ")", "else", ":", "self", ".", "_server_hostname", "=", "None", "return", "self", ".", "_server_hostname" ]
The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string
[ "The", "server", "hostname", "to", "connect", "to", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L432-L446
thiezn/iperf3-python
iperf3/iperf3.py
Client.protocol
def protocol(self): """The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str """ proto_id = self.lib.iperf_get_test_protocol_id(self._test) if proto_id == SOCK_STREAM: self._protocol = 'tcp' elif proto_id == SOCK_DGRAM: self._protocol = 'udp' return self._protocol
python
def protocol(self): """The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str """ proto_id = self.lib.iperf_get_test_protocol_id(self._test) if proto_id == SOCK_STREAM: self._protocol = 'tcp' elif proto_id == SOCK_DGRAM: self._protocol = 'udp' return self._protocol
[ "def", "protocol", "(", "self", ")", ":", "proto_id", "=", "self", ".", "lib", ".", "iperf_get_test_protocol_id", "(", "self", ".", "_test", ")", "if", "proto_id", "==", "SOCK_STREAM", ":", "self", ".", "_protocol", "=", "'tcp'", "elif", "proto_id", "==", "SOCK_DGRAM", ":", "self", ".", "_protocol", "=", "'udp'", "return", "self", ".", "_protocol" ]
The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str
[ "The", "iperf3", "instance", "protocol" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L457-L471
thiezn/iperf3-python
iperf3/iperf3.py
Client.omit
def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
python
def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
[ "def", "omit", "(", "self", ")", ":", "self", ".", "_omit", "=", "self", ".", "lib", ".", "iperf_get_test_omit", "(", "self", ".", "_test", ")", "return", "self", ".", "_omit" ]
The test startup duration to omit in seconds.
[ "The", "test", "startup", "duration", "to", "omit", "in", "seconds", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L486-L489
thiezn/iperf3-python
iperf3/iperf3.py
Client.duration
def duration(self): """The test duration in seconds.""" self._duration = self.lib.iperf_get_test_duration(self._test) return self._duration
python
def duration(self): """The test duration in seconds.""" self._duration = self.lib.iperf_get_test_duration(self._test) return self._duration
[ "def", "duration", "(", "self", ")", ":", "self", ".", "_duration", "=", "self", ".", "lib", ".", "iperf_get_test_duration", "(", "self", ".", "_test", ")", "return", "self", ".", "_duration" ]
The test duration in seconds.
[ "The", "test", "duration", "in", "seconds", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L497-L500
thiezn/iperf3-python
iperf3/iperf3.py
Client.bandwidth
def bandwidth(self): """Target bandwidth in bits/sec""" self._bandwidth = self.lib.iperf_get_test_rate(self._test) return self._bandwidth
python
def bandwidth(self): """Target bandwidth in bits/sec""" self._bandwidth = self.lib.iperf_get_test_rate(self._test) return self._bandwidth
[ "def", "bandwidth", "(", "self", ")", ":", "self", ".", "_bandwidth", "=", "self", ".", "lib", ".", "iperf_get_test_rate", "(", "self", ".", "_test", ")", "return", "self", ".", "_bandwidth" ]
Target bandwidth in bits/sec
[ "Target", "bandwidth", "in", "bits", "/", "sec" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L508-L511
thiezn/iperf3-python
iperf3/iperf3.py
Client.blksize
def blksize(self): """The test blksize.""" self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
python
def blksize(self): """The test blksize.""" self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
[ "def", "blksize", "(", "self", ")", ":", "self", ".", "_blksize", "=", "self", ".", "lib", ".", "iperf_get_test_blksize", "(", "self", ".", "_test", ")", "return", "self", ".", "_blksize" ]
The test blksize.
[ "The", "test", "blksize", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L519-L522
thiezn/iperf3-python
iperf3/iperf3.py
Client.num_streams
def num_streams(self): """The number of streams to use.""" self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
python
def num_streams(self): """The number of streams to use.""" self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
[ "def", "num_streams", "(", "self", ")", ":", "self", ".", "_num_streams", "=", "self", ".", "lib", ".", "iperf_get_test_num_streams", "(", "self", ".", "_test", ")", "return", "self", ".", "_num_streams" ]
The number of streams to use.
[ "The", "number", "of", "streams", "to", "use", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L552-L555
thiezn/iperf3-python
iperf3/iperf3.py
Client.reverse
def reverse(self): """Toggles direction of test :rtype: bool """ enabled = self.lib.iperf_get_test_reverse(self._test) if enabled: self._reverse = True else: self._reverse = False return self._reverse
python
def reverse(self): """Toggles direction of test :rtype: bool """ enabled = self.lib.iperf_get_test_reverse(self._test) if enabled: self._reverse = True else: self._reverse = False return self._reverse
[ "def", "reverse", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_reverse", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_reverse", "=", "True", "else", ":", "self", ".", "_reverse", "=", "False", "return", "self", ".", "_reverse" ]
Toggles direction of test :rtype: bool
[ "Toggles", "direction", "of", "test" ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L586-L598
thiezn/iperf3-python
iperf3/iperf3.py
Client.run
def run(self): """Run the current test client. :rtype: instance of :class:`TestResult` """ if self.json_output: output_to_pipe(self._pipe_in) # Disable stdout error = self.lib.iperf_run_client(self._test) if not self.iperf_version.startswith('iperf 3.1'): data = read_pipe(self._pipe_out) if data.startswith('Control connection'): data = '{' + data.split('{', 1)[1] else: data = c_char_p( self.lib.iperf_get_test_json_output_string(self._test) ).value if data: data = data.decode('utf-8') output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) return TestResult(data)
python
def run(self): """Run the current test client. :rtype: instance of :class:`TestResult` """ if self.json_output: output_to_pipe(self._pipe_in) # Disable stdout error = self.lib.iperf_run_client(self._test) if not self.iperf_version.startswith('iperf 3.1'): data = read_pipe(self._pipe_out) if data.startswith('Control connection'): data = '{' + data.split('{', 1)[1] else: data = c_char_p( self.lib.iperf_get_test_json_output_string(self._test) ).value if data: data = data.decode('utf-8') output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) return TestResult(data)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "json_output", ":", "output_to_pipe", "(", "self", ".", "_pipe_in", ")", "# Disable stdout", "error", "=", "self", ".", "lib", ".", "iperf_run_client", "(", "self", ".", "_test", ")", "if", "not", "self", ".", "iperf_version", ".", "startswith", "(", "'iperf 3.1'", ")", ":", "data", "=", "read_pipe", "(", "self", ".", "_pipe_out", ")", "if", "data", ".", "startswith", "(", "'Control connection'", ")", ":", "data", "=", "'{'", "+", "data", ".", "split", "(", "'{'", ",", "1", ")", "[", "1", "]", "else", ":", "data", "=", "c_char_p", "(", "self", ".", "lib", ".", "iperf_get_test_json_output_string", "(", "self", ".", "_test", ")", ")", ".", "value", "if", "data", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "output_to_screen", "(", "self", ".", "_stdout_fd", ",", "self", ".", "_stderr_fd", ")", "# enable stdout", "if", "not", "data", "or", "error", ":", "data", "=", "'{\"error\": \"%s\"}'", "%", "self", ".", "_error_to_string", "(", "self", ".", "_errno", ")", "return", "TestResult", "(", "data", ")" ]
Run the current test client. :rtype: instance of :class:`TestResult`
[ "Run", "the", "current", "test", "client", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L609-L634
thiezn/iperf3-python
iperf3/iperf3.py
Server.run
def run(self): """Run the iperf3 server instance. :rtype: instance of :class:`TestResult` """ def _run_in_thread(self, data_queue): """Runs the iperf_run_server :param data_queue: thread-safe queue """ output_to_pipe(self._pipe_in) # disable stdout error = self.lib.iperf_run_server(self._test) output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout # TODO json_output_string not available on earlier iperf3 builds # have to build in a version check using self.iperf_version # The following line should work on later versions: # data = c_char_p( # self.lib.iperf_get_test_json_output_string(self._test) # ).value data = read_pipe(self._pipe_out) if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) self.lib.iperf_reset_test(self._test) data_queue.put(data) if self.json_output: data_queue = Queue() t = threading.Thread( target=_run_in_thread, args=[self, data_queue] ) t.daemon = True t.start() while t.is_alive(): t.join(.1) return TestResult(data_queue.get()) else: # setting json_output to False will output test to screen only self.lib.iperf_run_server(self._test) self.lib.iperf_reset_test(self._test) return None
python
def run(self): """Run the iperf3 server instance. :rtype: instance of :class:`TestResult` """ def _run_in_thread(self, data_queue): """Runs the iperf_run_server :param data_queue: thread-safe queue """ output_to_pipe(self._pipe_in) # disable stdout error = self.lib.iperf_run_server(self._test) output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout # TODO json_output_string not available on earlier iperf3 builds # have to build in a version check using self.iperf_version # The following line should work on later versions: # data = c_char_p( # self.lib.iperf_get_test_json_output_string(self._test) # ).value data = read_pipe(self._pipe_out) if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) self.lib.iperf_reset_test(self._test) data_queue.put(data) if self.json_output: data_queue = Queue() t = threading.Thread( target=_run_in_thread, args=[self, data_queue] ) t.daemon = True t.start() while t.is_alive(): t.join(.1) return TestResult(data_queue.get()) else: # setting json_output to False will output test to screen only self.lib.iperf_run_server(self._test) self.lib.iperf_reset_test(self._test) return None
[ "def", "run", "(", "self", ")", ":", "def", "_run_in_thread", "(", "self", ",", "data_queue", ")", ":", "\"\"\"Runs the iperf_run_server\n\n :param data_queue: thread-safe queue\n \"\"\"", "output_to_pipe", "(", "self", ".", "_pipe_in", ")", "# disable stdout", "error", "=", "self", ".", "lib", ".", "iperf_run_server", "(", "self", ".", "_test", ")", "output_to_screen", "(", "self", ".", "_stdout_fd", ",", "self", ".", "_stderr_fd", ")", "# enable stdout", "# TODO json_output_string not available on earlier iperf3 builds", "# have to build in a version check using self.iperf_version", "# The following line should work on later versions:", "# data = c_char_p(", "# self.lib.iperf_get_test_json_output_string(self._test)", "# ).value", "data", "=", "read_pipe", "(", "self", ".", "_pipe_out", ")", "if", "not", "data", "or", "error", ":", "data", "=", "'{\"error\": \"%s\"}'", "%", "self", ".", "_error_to_string", "(", "self", ".", "_errno", ")", "self", ".", "lib", ".", "iperf_reset_test", "(", "self", ".", "_test", ")", "data_queue", ".", "put", "(", "data", ")", "if", "self", ".", "json_output", ":", "data_queue", "=", "Queue", "(", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "_run_in_thread", ",", "args", "=", "[", "self", ",", "data_queue", "]", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")", "while", "t", ".", "is_alive", "(", ")", ":", "t", ".", "join", "(", ".1", ")", "return", "TestResult", "(", "data_queue", ".", "get", "(", ")", ")", "else", ":", "# setting json_output to False will output test to screen only", "self", ".", "lib", ".", "iperf_run_server", "(", "self", ".", "_test", ")", "self", ".", "lib", ".", "iperf_reset_test", "(", "self", ".", "_test", ")", "return", "None" ]
Run the iperf3 server instance. :rtype: instance of :class:`TestResult`
[ "Run", "the", "iperf3", "server", "instance", "." ]
train
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L660-L707
joferkington/mplstereonet
mplstereonet/convenience_functions.py
subplots
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, hemisphere='lower', projection='equal_area', **fig_kw): """ Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. """ import matplotlib.pyplot as plt if projection in ['equal_area', 'equal_angle']: projection += '_stereonet' if subplot_kw == None: subplot_kw = {} subplot_kw['projection'] = projection return plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, **fig_kw)
python
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, hemisphere='lower', projection='equal_area', **fig_kw): """ Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. """ import matplotlib.pyplot as plt if projection in ['equal_area', 'equal_angle']: projection += '_stereonet' if subplot_kw == None: subplot_kw = {} subplot_kw['projection'] = projection return plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, **fig_kw)
[ "def", "subplots", "(", "nrows", "=", "1", ",", "ncols", "=", "1", ",", "sharex", "=", "False", ",", "sharey", "=", "False", ",", "squeeze", "=", "True", ",", "subplot_kw", "=", "None", ",", "hemisphere", "=", "'lower'", ",", "projection", "=", "'equal_area'", ",", "*", "*", "fig_kw", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "projection", "in", "[", "'equal_area'", ",", "'equal_angle'", "]", ":", "projection", "+=", "'_stereonet'", "if", "subplot_kw", "==", "None", ":", "subplot_kw", "=", "{", "}", "subplot_kw", "[", "'projection'", "]", "=", "projection", "return", "plt", ".", "subplots", "(", "nrows", ",", "ncols", ",", "sharex", "=", "sharex", ",", "sharey", "=", "sharey", ",", "squeeze", "=", "squeeze", ",", "subplot_kw", "=", "subplot_kw", ",", "*", "*", "fig_kw", ")" ]
Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above.
[ "Identical", "to", "matplotlib", ".", "pyplot", ".", "subplots", "except", "that", "this", "will", "default", "to", "producing", "equal", "-", "area", "stereonet", "axes", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/convenience_functions.py#L1-L108
joferkington/mplstereonet
examples/fault_slip_plot.py
fault_and_striae_plot
def fault_and_striae_plot(ax, strikes, dips, rakes): """Makes a fault-and-striae plot (a.k.a. "Ball of String") for normal faults with the given strikes, dips, and rakes.""" # Plot the planes lines = ax.plane(strikes, dips, 'k-', lw=0.5) # Calculate the position of the rake of the lineations, but don't plot yet x, y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # For thrusts, it would just be u, v = -x/mag, -y/mag mag = np.hypot(x, y) u, v = x / mag, y / mag # Plot the arrows at the rake locations... arrows = ax.quiver(x, y, u, v, width=1, headwidth=4, units='dots') return lines, arrows
python
def fault_and_striae_plot(ax, strikes, dips, rakes): """Makes a fault-and-striae plot (a.k.a. "Ball of String") for normal faults with the given strikes, dips, and rakes.""" # Plot the planes lines = ax.plane(strikes, dips, 'k-', lw=0.5) # Calculate the position of the rake of the lineations, but don't plot yet x, y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # For thrusts, it would just be u, v = -x/mag, -y/mag mag = np.hypot(x, y) u, v = x / mag, y / mag # Plot the arrows at the rake locations... arrows = ax.quiver(x, y, u, v, width=1, headwidth=4, units='dots') return lines, arrows
[ "def", "fault_and_striae_plot", "(", "ax", ",", "strikes", ",", "dips", ",", "rakes", ")", ":", "# Plot the planes", "lines", "=", "ax", ".", "plane", "(", "strikes", ",", "dips", ",", "'k-'", ",", "lw", "=", "0.5", ")", "# Calculate the position of the rake of the lineations, but don't plot yet", "x", ",", "y", "=", "mplstereonet", ".", "rake", "(", "strikes", ",", "dips", ",", "rakes", ")", "# Calculate the direction the arrows should point", "# These are all normal faults, so the arrows point away from the center", "# For thrusts, it would just be u, v = -x/mag, -y/mag", "mag", "=", "np", ".", "hypot", "(", "x", ",", "y", ")", "u", ",", "v", "=", "x", "/", "mag", ",", "y", "/", "mag", "# Plot the arrows at the rake locations...", "arrows", "=", "ax", ".", "quiver", "(", "x", ",", "y", ",", "u", ",", "v", ",", "width", "=", "1", ",", "headwidth", "=", "4", ",", "units", "=", "'dots'", ")", "return", "lines", ",", "arrows" ]
Makes a fault-and-striae plot (a.k.a. "Ball of String") for normal faults with the given strikes, dips, and rakes.
[ "Makes", "a", "fault", "-", "and", "-", "striae", "plot", "(", "a", ".", "k", ".", "a", ".", "Ball", "of", "String", ")", "for", "normal", "faults", "with", "the", "given", "strikes", "dips", "and", "rakes", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/fault_slip_plot.py#L35-L52
joferkington/mplstereonet
examples/fault_slip_plot.py
tangent_lineation_plot
def tangent_lineation_plot(ax, strikes, dips, rakes): """Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.""" # Calculate the position of the rake of the lineations, but don't plot yet rake_x, rake_y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # Because we're plotting at the pole location, however, we need to flip this # from what we plotted with the "ball of string" plot. mag = np.hypot(rake_x, rake_y) u, v = -rake_x / mag, -rake_y / mag # Calculate the position of the poles pole_x, pole_y = mplstereonet.pole(strikes, dips) # Plot the arrows centered on the pole locations... arrows = ax.quiver(pole_x, pole_y, u, v, width=1, headwidth=4, units='dots', pivot='middle') return arrows
python
def tangent_lineation_plot(ax, strikes, dips, rakes): """Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.""" # Calculate the position of the rake of the lineations, but don't plot yet rake_x, rake_y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # Because we're plotting at the pole location, however, we need to flip this # from what we plotted with the "ball of string" plot. mag = np.hypot(rake_x, rake_y) u, v = -rake_x / mag, -rake_y / mag # Calculate the position of the poles pole_x, pole_y = mplstereonet.pole(strikes, dips) # Plot the arrows centered on the pole locations... arrows = ax.quiver(pole_x, pole_y, u, v, width=1, headwidth=4, units='dots', pivot='middle') return arrows
[ "def", "tangent_lineation_plot", "(", "ax", ",", "strikes", ",", "dips", ",", "rakes", ")", ":", "# Calculate the position of the rake of the lineations, but don't plot yet", "rake_x", ",", "rake_y", "=", "mplstereonet", ".", "rake", "(", "strikes", ",", "dips", ",", "rakes", ")", "# Calculate the direction the arrows should point", "# These are all normal faults, so the arrows point away from the center", "# Because we're plotting at the pole location, however, we need to flip this", "# from what we plotted with the \"ball of string\" plot.", "mag", "=", "np", ".", "hypot", "(", "rake_x", ",", "rake_y", ")", "u", ",", "v", "=", "-", "rake_x", "/", "mag", ",", "-", "rake_y", "/", "mag", "# Calculate the position of the poles", "pole_x", ",", "pole_y", "=", "mplstereonet", ".", "pole", "(", "strikes", ",", "dips", ")", "# Plot the arrows centered on the pole locations...", "arrows", "=", "ax", ".", "quiver", "(", "pole_x", ",", "pole_y", ",", "u", ",", "v", ",", "width", "=", "1", ",", "headwidth", "=", "4", ",", "units", "=", "'dots'", ",", "pivot", "=", "'middle'", ")", "return", "arrows" ]
Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.
[ "Makes", "a", "tangent", "lineation", "plot", "for", "normal", "faults", "with", "the", "given", "strikes", "dips", "and", "rakes", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/fault_slip_plot.py#L54-L73
joferkington/mplstereonet
examples/parse_angelier_data.py
load
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # First column: strike, second: dip, third: rake. strike, dip, rake = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake) else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. strike, dip = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # Separate the columns back out strike, dip, rake = zip(*data) return strike, dip, rake
python
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # First column: strike, second: dip, third: rake. strike, dip, rake = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake) else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. strike, dip = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # Separate the columns back out strike, dip, rake = zip(*data) return strike, dip, rake
[ "def", "load", "(", ")", ":", "# Get the data file relative to this file's location...", "datadir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "'angelier_data.txt'", ")", "data", "=", "[", "]", "with", "open", "(", "filename", ",", "'r'", ")", "as", "infile", ":", "for", "line", "in", "infile", ":", "# Skip comments", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "# First column: strike, second: dip, third: rake.", "strike", ",", "dip", ",", "rake", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "rake", "[", "-", "1", "]", ".", "isalpha", "(", ")", ":", "# If there's a directional letter on the rake column, parse it", "# normally.", "strike", ",", "dip", ",", "rake", "=", "mplstereonet", ".", "parse_rake", "(", "strike", ",", "dip", ",", "rake", ")", "else", ":", "# Otherwise, it's actually an azimuthal measurement of the", "# slickenslide directions, so we need to convert it to a rake.", "strike", ",", "dip", "=", "mplstereonet", ".", "parse_strike_dip", "(", "strike", ",", "dip", ")", "azimuth", "=", "float", "(", "rake", ")", "rake", "=", "mplstereonet", ".", "azimuth2rake", "(", "strike", ",", "dip", ",", "azimuth", ")", "data", ".", "append", "(", "[", "strike", ",", "dip", ",", "rake", "]", ")", "# Separate the columns back out", "strike", ",", "dip", ",", "rake", "=", "zip", "(", "*", "data", ")", "return", "strike", ",", "dip", ",", "rake" ]
Read data from a text file on disk.
[ "Read", "data", "from", "a", "text", "file", "on", "disk", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/parse_angelier_data.py#L32-L63
joferkington/mplstereonet
mplstereonet/contouring.py
_count_points
def _count_points(lons, lats, func, sigma, gridsize=(100,100), weights=None): """This function actually calculates the point density of the input ("lons" and "lats") points at a series of counter stations. Creates "gridsize" regular grid of counter stations in lat-long space, calculates the distance to all input points at each counter station, and then calculates the density using "func". Each input point is weighted by the corresponding item of "weights". The weights are normalized to 1 before calculation.""" lons = np.atleast_1d(np.squeeze(lons)) lats = np.atleast_1d(np.squeeze(lats)) if not weights: weights = 1 # Normalize the weights weights = np.asarray(weights, dtype=np.float) weights /= weights.mean() # Generate a regular grid of "counters" to measure on... bound = np.pi / 2.0 nrows, ncols = gridsize xmin, xmax, ymin, ymax = -bound, bound, -bound, bound lon, lat = np.mgrid[xmin : xmax : ncols * 1j, ymin : ymax : nrows * 1j] xyz_counters = stereonet_math.sph2cart(lon.ravel(), lat.ravel()) xyz_counters = np.vstack(xyz_counters).T xyz_points = stereonet_math.sph2cart(lons, lats) xyz_points = np.vstack(xyz_points).T # Basically, we can't model this as a convolution as we're not in cartesian # space, so we have to iterate through and call the kernel function at # each "counter". totals = np.zeros(xyz_counters.shape[0], dtype=np.float) for i, xyz in enumerate(xyz_counters): cos_dist = np.abs(np.dot(xyz, xyz_points.T)) density, scale = func(cos_dist, sigma) density *= weights totals[i] = (density.sum() - 0.5) / scale # Traditionally, the negative values (while valid, as they represent areas # with less than expected point-density) are not returned. totals[totals < 0] = 0 counter_lon, counter_lat = stereonet_math.cart2sph(*xyz_counters.T) for item in [counter_lon, counter_lat, totals]: item.shape = gridsize return counter_lon, counter_lat, totals
python
def _count_points(lons, lats, func, sigma, gridsize=(100,100), weights=None): """This function actually calculates the point density of the input ("lons" and "lats") points at a series of counter stations. Creates "gridsize" regular grid of counter stations in lat-long space, calculates the distance to all input points at each counter station, and then calculates the density using "func". Each input point is weighted by the corresponding item of "weights". The weights are normalized to 1 before calculation.""" lons = np.atleast_1d(np.squeeze(lons)) lats = np.atleast_1d(np.squeeze(lats)) if not weights: weights = 1 # Normalize the weights weights = np.asarray(weights, dtype=np.float) weights /= weights.mean() # Generate a regular grid of "counters" to measure on... bound = np.pi / 2.0 nrows, ncols = gridsize xmin, xmax, ymin, ymax = -bound, bound, -bound, bound lon, lat = np.mgrid[xmin : xmax : ncols * 1j, ymin : ymax : nrows * 1j] xyz_counters = stereonet_math.sph2cart(lon.ravel(), lat.ravel()) xyz_counters = np.vstack(xyz_counters).T xyz_points = stereonet_math.sph2cart(lons, lats) xyz_points = np.vstack(xyz_points).T # Basically, we can't model this as a convolution as we're not in cartesian # space, so we have to iterate through and call the kernel function at # each "counter". totals = np.zeros(xyz_counters.shape[0], dtype=np.float) for i, xyz in enumerate(xyz_counters): cos_dist = np.abs(np.dot(xyz, xyz_points.T)) density, scale = func(cos_dist, sigma) density *= weights totals[i] = (density.sum() - 0.5) / scale # Traditionally, the negative values (while valid, as they represent areas # with less than expected point-density) are not returned. totals[totals < 0] = 0 counter_lon, counter_lat = stereonet_math.cart2sph(*xyz_counters.T) for item in [counter_lon, counter_lat, totals]: item.shape = gridsize return counter_lon, counter_lat, totals
[ "def", "_count_points", "(", "lons", ",", "lats", ",", "func", ",", "sigma", ",", "gridsize", "=", "(", "100", ",", "100", ")", ",", "weights", "=", "None", ")", ":", "lons", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "lons", ")", ")", "lats", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "lats", ")", ")", "if", "not", "weights", ":", "weights", "=", "1", "# Normalize the weights", "weights", "=", "np", ".", "asarray", "(", "weights", ",", "dtype", "=", "np", ".", "float", ")", "weights", "/=", "weights", ".", "mean", "(", ")", "# Generate a regular grid of \"counters\" to measure on...", "bound", "=", "np", ".", "pi", "/", "2.0", "nrows", ",", "ncols", "=", "gridsize", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", "=", "-", "bound", ",", "bound", ",", "-", "bound", ",", "bound", "lon", ",", "lat", "=", "np", ".", "mgrid", "[", "xmin", ":", "xmax", ":", "ncols", "*", "1j", ",", "ymin", ":", "ymax", ":", "nrows", "*", "1j", "]", "xyz_counters", "=", "stereonet_math", ".", "sph2cart", "(", "lon", ".", "ravel", "(", ")", ",", "lat", ".", "ravel", "(", ")", ")", "xyz_counters", "=", "np", ".", "vstack", "(", "xyz_counters", ")", ".", "T", "xyz_points", "=", "stereonet_math", ".", "sph2cart", "(", "lons", ",", "lats", ")", "xyz_points", "=", "np", ".", "vstack", "(", "xyz_points", ")", ".", "T", "# Basically, we can't model this as a convolution as we're not in cartesian", "# space, so we have to iterate through and call the kernel function at", "# each \"counter\".", "totals", "=", "np", ".", "zeros", "(", "xyz_counters", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "float", ")", "for", "i", ",", "xyz", "in", "enumerate", "(", "xyz_counters", ")", ":", "cos_dist", "=", "np", ".", "abs", "(", "np", ".", "dot", "(", "xyz", ",", "xyz_points", ".", "T", ")", ")", "density", ",", "scale", "=", "func", "(", "cos_dist", ",", "sigma", ")", "density", "*=", "weights", "totals", "[", "i", "]", "=", "(", "density", ".", "sum", "(", ")", "-", "0.5", ")", "/", "scale", "# Traditionally, the negative values (while valid, as they represent areas", "# with less than expected point-density) are not returned.", "totals", "[", "totals", "<", "0", "]", "=", "0", "counter_lon", ",", "counter_lat", "=", "stereonet_math", ".", "cart2sph", "(", "*", "xyz_counters", ".", "T", ")", "for", "item", "in", "[", "counter_lon", ",", "counter_lat", ",", "totals", "]", ":", "item", ".", "shape", "=", "gridsize", "return", "counter_lon", ",", "counter_lat", ",", "totals" ]
This function actually calculates the point density of the input ("lons" and "lats") points at a series of counter stations. Creates "gridsize" regular grid of counter stations in lat-long space, calculates the distance to all input points at each counter station, and then calculates the density using "func". Each input point is weighted by the corresponding item of "weights". The weights are normalized to 1 before calculation.
[ "This", "function", "actually", "calculates", "the", "point", "density", "of", "the", "input", "(", "lons", "and", "lats", ")", "points", "at", "a", "series", "of", "counter", "stations", ".", "Creates", "gridsize", "regular", "grid", "of", "counter", "stations", "in", "lat", "-", "long", "space", "calculates", "the", "distance", "to", "all", "input", "points", "at", "each", "counter", "station", "and", "then", "calculates", "the", "density", "using", "func", ".", "Each", "input", "point", "is", "weighted", "by", "the", "corresponding", "item", "of", "weights", ".", "The", "weights", "are", "normalized", "to", "1", "before", "calculation", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L4-L47
joferkington/mplstereonet
mplstereonet/contouring.py
density_grid
def density_grid(*args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.). Returns a regular (in lat-long space) grid of density estimates over a hemispherical surface. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. Returns ------- xi, yi, zi : 2D arrays The longitude, latitude and density values of the regularly gridded density estimates. Longitude and latitude are in radians. See Also --------- mplstereonet.StereonetAxes.density_contourf mplstereonet.StereonetAxes.density_contour References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ def do_nothing(x, y): return x, y measurement = kwargs.get('measurement', 'poles') gridsize = kwargs.get('gridsize', 100) weights = kwargs.get('weights', None) try: gridsize = int(gridsize) gridsize = (gridsize, gridsize) except TypeError: pass func = {'poles':stereonet_math.pole, 'lines':stereonet_math.line, 'rakes':stereonet_math.rake, 'radians':do_nothing}[measurement] lon, lat = func(*args) method = kwargs.get('method', 'exponential_kamb') sigma = kwargs.get('sigma', 3) func = {'linear_kamb':_linear_inverse_kamb, 'square_kamb':_square_inverse_kamb, 'schmidt':_schmidt_count, 'kamb':_kamb_count, 'exponential_kamb':_exponential_kamb, }[method] lon, lat, z = _count_points(lon, lat, func, sigma, gridsize, weights) return lon, lat, z
python
def density_grid(*args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.). Returns a regular (in lat-long space) grid of density estimates over a hemispherical surface. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. Returns ------- xi, yi, zi : 2D arrays The longitude, latitude and density values of the regularly gridded density estimates. Longitude and latitude are in radians. See Also --------- mplstereonet.StereonetAxes.density_contourf mplstereonet.StereonetAxes.density_contour References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ def do_nothing(x, y): return x, y measurement = kwargs.get('measurement', 'poles') gridsize = kwargs.get('gridsize', 100) weights = kwargs.get('weights', None) try: gridsize = int(gridsize) gridsize = (gridsize, gridsize) except TypeError: pass func = {'poles':stereonet_math.pole, 'lines':stereonet_math.line, 'rakes':stereonet_math.rake, 'radians':do_nothing}[measurement] lon, lat = func(*args) method = kwargs.get('method', 'exponential_kamb') sigma = kwargs.get('sigma', 3) func = {'linear_kamb':_linear_inverse_kamb, 'square_kamb':_square_inverse_kamb, 'schmidt':_schmidt_count, 'kamb':_kamb_count, 'exponential_kamb':_exponential_kamb, }[method] lon, lat, z = _count_points(lon, lat, func, sigma, gridsize, weights) return lon, lat, z
[ "def", "density_grid", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "do_nothing", "(", "x", ",", "y", ")", ":", "return", "x", ",", "y", "measurement", "=", "kwargs", ".", "get", "(", "'measurement'", ",", "'poles'", ")", "gridsize", "=", "kwargs", ".", "get", "(", "'gridsize'", ",", "100", ")", "weights", "=", "kwargs", ".", "get", "(", "'weights'", ",", "None", ")", "try", ":", "gridsize", "=", "int", "(", "gridsize", ")", "gridsize", "=", "(", "gridsize", ",", "gridsize", ")", "except", "TypeError", ":", "pass", "func", "=", "{", "'poles'", ":", "stereonet_math", ".", "pole", ",", "'lines'", ":", "stereonet_math", ".", "line", ",", "'rakes'", ":", "stereonet_math", ".", "rake", ",", "'radians'", ":", "do_nothing", "}", "[", "measurement", "]", "lon", ",", "lat", "=", "func", "(", "*", "args", ")", "method", "=", "kwargs", ".", "get", "(", "'method'", ",", "'exponential_kamb'", ")", "sigma", "=", "kwargs", ".", "get", "(", "'sigma'", ",", "3", ")", "func", "=", "{", "'linear_kamb'", ":", "_linear_inverse_kamb", ",", "'square_kamb'", ":", "_square_inverse_kamb", ",", "'schmidt'", ":", "_schmidt_count", ",", "'kamb'", ":", "_kamb_count", ",", "'exponential_kamb'", ":", "_exponential_kamb", ",", "}", "[", "method", "]", "lon", ",", "lat", ",", "z", "=", "_count_points", "(", "lon", ",", "lat", ",", "func", ",", "sigma", ",", "gridsize", ",", "weights", ")", "return", "lon", ",", "lat", ",", "z" ]
Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.). Returns a regular (in lat-long space) grid of density estimates over a hemispherical surface. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. Returns ------- xi, yi, zi : 2D arrays The longitude, latitude and density values of the regularly gridded density estimates. Longitude and latitude are in radians. See Also --------- mplstereonet.StereonetAxes.density_contourf mplstereonet.StereonetAxes.density_contour References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
[ "Estimates", "point", "density", "of", "the", "given", "linear", "orientation", "measurements", "(", "Interpreted", "as", "poles", "lines", "rakes", "or", "raw", "longitudes", "and", "latitudes", "based", "on", "the", "measurement", "keyword", "argument", ".", ")", ".", "Returns", "a", "regular", "(", "in", "lat", "-", "long", "space", ")", "grid", "of", "density", "estimates", "over", "a", "hemispherical", "surface", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L49-L170
joferkington/mplstereonet
mplstereonet/contouring.py
_exponential_kamb
def _exponential_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for exponential smoothing.""" n = float(cos_dist.size) f = 2 * (1.0 + n / sigma**2) count = np.exp(f * (cos_dist - 1)) units = np.sqrt(n * (f/2.0 - 1) / f**2) return count, units
python
def _exponential_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for exponential smoothing.""" n = float(cos_dist.size) f = 2 * (1.0 + n / sigma**2) count = np.exp(f * (cos_dist - 1)) units = np.sqrt(n * (f/2.0 - 1) / f**2) return count, units
[ "def", "_exponential_kamb", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "f", "=", "2", "*", "(", "1.0", "+", "n", "/", "sigma", "**", "2", ")", "count", "=", "np", ".", "exp", "(", "f", "*", "(", "cos_dist", "-", "1", ")", ")", "units", "=", "np", ".", "sqrt", "(", "n", "*", "(", "f", "/", "2.0", "-", "1", ")", "/", "f", "**", "2", ")", "return", "count", ",", "units" ]
Kernel function from Vollmer for exponential smoothing.
[ "Kernel", "function", "from", "Vollmer", "for", "exponential", "smoothing", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L183-L189
joferkington/mplstereonet
mplstereonet/contouring.py
_linear_inverse_kamb
def _linear_inverse_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for linear smoothing.""" n = float(cos_dist.size) radius = _kamb_radius(n, sigma) f = 2 / (1 - radius) cos_dist = cos_dist[cos_dist >= radius] count = (f * (cos_dist - radius)) return count, _kamb_units(n, radius)
python
def _linear_inverse_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for linear smoothing.""" n = float(cos_dist.size) radius = _kamb_radius(n, sigma) f = 2 / (1 - radius) cos_dist = cos_dist[cos_dist >= radius] count = (f * (cos_dist - radius)) return count, _kamb_units(n, radius)
[ "def", "_linear_inverse_kamb", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "radius", "=", "_kamb_radius", "(", "n", ",", "sigma", ")", "f", "=", "2", "/", "(", "1", "-", "radius", ")", "cos_dist", "=", "cos_dist", "[", "cos_dist", ">=", "radius", "]", "count", "=", "(", "f", "*", "(", "cos_dist", "-", "radius", ")", ")", "return", "count", ",", "_kamb_units", "(", "n", ",", "radius", ")" ]
Kernel function from Vollmer for linear smoothing.
[ "Kernel", "function", "from", "Vollmer", "for", "linear", "smoothing", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L191-L198
joferkington/mplstereonet
mplstereonet/contouring.py
_kamb_count
def _kamb_count(cos_dist, sigma=3): """Original Kamb kernel function (raw count within radius).""" n = float(cos_dist.size) dist = _kamb_radius(n, sigma) count = (cos_dist >= dist).astype(float) return count, _kamb_units(n, dist)
python
def _kamb_count(cos_dist, sigma=3): """Original Kamb kernel function (raw count within radius).""" n = float(cos_dist.size) dist = _kamb_radius(n, sigma) count = (cos_dist >= dist).astype(float) return count, _kamb_units(n, dist)
[ "def", "_kamb_count", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "dist", "=", "_kamb_radius", "(", "n", ",", "sigma", ")", "count", "=", "(", "cos_dist", ">=", "dist", ")", ".", "astype", "(", "float", ")", "return", "count", ",", "_kamb_units", "(", "n", ",", "dist", ")" ]
Original Kamb kernel function (raw count within radius).
[ "Original", "Kamb", "kernel", "function", "(", "raw", "count", "within", "radius", ")", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L209-L214
joferkington/mplstereonet
mplstereonet/contouring.py
_schmidt_count
def _schmidt_count(cos_dist, sigma=None): """Schmidt (a.k.a. 1%) counting kernel function.""" radius = 0.01 count = ((1 - cos_dist) <= radius).astype(float) # To offset the count.sum() - 0.5 required for the kamb methods... count = 0.5 / count.size + count return count, (cos_dist.size * radius)
python
def _schmidt_count(cos_dist, sigma=None): """Schmidt (a.k.a. 1%) counting kernel function.""" radius = 0.01 count = ((1 - cos_dist) <= radius).astype(float) # To offset the count.sum() - 0.5 required for the kamb methods... count = 0.5 / count.size + count return count, (cos_dist.size * radius)
[ "def", "_schmidt_count", "(", "cos_dist", ",", "sigma", "=", "None", ")", ":", "radius", "=", "0.01", "count", "=", "(", "(", "1", "-", "cos_dist", ")", "<=", "radius", ")", ".", "astype", "(", "float", ")", "# To offset the count.sum() - 0.5 required for the kamb methods...", "count", "=", "0.5", "/", "count", ".", "size", "+", "count", "return", "count", ",", "(", "cos_dist", ".", "size", "*", "radius", ")" ]
Schmidt (a.k.a. 1%) counting kernel function.
[ "Schmidt", "(", "a", ".", "k", ".", "a", ".", "1%", ")", "counting", "kernel", "function", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L216-L222
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._get_core_transform
def _get_core_transform(self, resolution): """The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.""" return self._base_transform(self._center_longitude, self._center_latitude, resolution)
python
def _get_core_transform(self, resolution): """The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.""" return self._base_transform(self._center_longitude, self._center_latitude, resolution)
[ "def", "_get_core_transform", "(", "self", ",", "resolution", ")", ":", "return", "self", ".", "_base_transform", "(", "self", ".", "_center_longitude", ",", "self", ".", "_center_latitude", ",", "resolution", ")" ]
The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.
[ "The", "projection", "for", "the", "stereonet", "as", "a", "matplotlib", "transform", ".", "This", "is", "primarily", "called", "by", "LambertAxes", ".", "_set_lim_and_transforms", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L55-L60
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._get_affine_transform
def _get_affine_transform(self): """The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.""" # How big is the projected globe? # In the case of a stereonet, it's actually constant. xscale = yscale = self._scale # Create an affine transform to stretch the projection from 0-1 return Affine2D() \ .rotate(np.radians(self.rotation)) \ .scale(0.5 / xscale, 0.5 / yscale) \ .translate(0.5, 0.5)
python
def _get_affine_transform(self): """The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.""" # How big is the projected globe? # In the case of a stereonet, it's actually constant. xscale = yscale = self._scale # Create an affine transform to stretch the projection from 0-1 return Affine2D() \ .rotate(np.radians(self.rotation)) \ .scale(0.5 / xscale, 0.5 / yscale) \ .translate(0.5, 0.5)
[ "def", "_get_affine_transform", "(", "self", ")", ":", "# How big is the projected globe?", "# In the case of a stereonet, it's actually constant.", "xscale", "=", "yscale", "=", "self", ".", "_scale", "# Create an affine transform to stretch the projection from 0-1", "return", "Affine2D", "(", ")", ".", "rotate", "(", "np", ".", "radians", "(", "self", ".", "rotation", ")", ")", ".", "scale", "(", "0.5", "/", "xscale", ",", "0.5", "/", "yscale", ")", ".", "translate", "(", "0.5", ",", "0.5", ")" ]
The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.
[ "The", "affine", "portion", "of", "the", "base", "transform", ".", "This", "is", "called", "by", "LambertAxes", ".", "_set_lim_and_transforms", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L62-L72
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._set_lim_and_transforms
def _set_lim_and_transforms(self): """Setup the key transforms for the axes.""" # Most of the transforms are set up correctly by LambertAxes LambertAxes._set_lim_and_transforms(self) # Transform for latitude ticks. These are typically unused, but just # in case we need them... yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) # These are identical to LambertAxes._set_lim_and_transforms, but we # need to update things to reflect the new "yaxis_stretch" yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
python
def _set_lim_and_transforms(self): """Setup the key transforms for the axes.""" # Most of the transforms are set up correctly by LambertAxes LambertAxes._set_lim_and_transforms(self) # Transform for latitude ticks. These are typically unused, but just # in case we need them... yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) # These are identical to LambertAxes._set_lim_and_transforms, but we # need to update things to reflect the new "yaxis_stretch" yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
[ "def", "_set_lim_and_transforms", "(", "self", ")", ":", "# Most of the transforms are set up correctly by LambertAxes", "LambertAxes", ".", "_set_lim_and_transforms", "(", "self", ")", "# Transform for latitude ticks. These are typically unused, but just", "# in case we need them...", "yaxis_stretch", "=", "Affine2D", "(", ")", ".", "scale", "(", "4", "*", "self", ".", "horizon", ",", "1.0", ")", "yaxis_stretch", "=", "yaxis_stretch", ".", "translate", "(", "-", "self", ".", "horizon", ",", "0.0", ")", "# These are identical to LambertAxes._set_lim_and_transforms, but we", "# need to update things to reflect the new \"yaxis_stretch\"", "yaxis_space", "=", "Affine2D", "(", ")", ".", "scale", "(", "1.0", ",", "1.1", ")", "self", ".", "_yaxis_transform", "=", "yaxis_stretch", "+", "self", ".", "transData", "yaxis_text_base", "=", "yaxis_stretch", "+", "self", ".", "transProjection", "+", "(", "yaxis_space", "+", "self", ".", "transAffine", "+", "self", ".", "transAxes", ")", "self", ".", "_yaxis_text1_transform", "=", "yaxis_text_base", "+", "Affine2D", "(", ")", ".", "translate", "(", "-", "8.0", ",", "0.0", ")", "self", ".", "_yaxis_text2_transform", "=", "yaxis_text_base", "+", "Affine2D", "(", ")", ".", "translate", "(", "8.0", ",", "0.0", ")" ]
Setup the key transforms for the axes.
[ "Setup", "the", "key", "transforms", "for", "the", "axes", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L74-L101
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_longitude_grid
def set_longitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. """ number = (360.0 / degrees) + 1 locs = np.linspace(-np.pi, np.pi, number, True)[1:] locs[-1] -= 0.01 # Workaround for "back" gridlines showing. self.xaxis.set_major_locator(FixedLocator(locs)) self._logitude_degrees = degrees self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
python
def set_longitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. """ number = (360.0 / degrees) + 1 locs = np.linspace(-np.pi, np.pi, number, True)[1:] locs[-1] -= 0.01 # Workaround for "back" gridlines showing. self.xaxis.set_major_locator(FixedLocator(locs)) self._logitude_degrees = degrees self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
[ "def", "set_longitude_grid", "(", "self", ",", "degrees", ")", ":", "number", "=", "(", "360.0", "/", "degrees", ")", "+", "1", "locs", "=", "np", ".", "linspace", "(", "-", "np", ".", "pi", ",", "np", ".", "pi", ",", "number", ",", "True", ")", "[", "1", ":", "]", "locs", "[", "-", "1", "]", "-=", "0.01", "# Workaround for \"back\" gridlines showing.", "self", ".", "xaxis", ".", "set_major_locator", "(", "FixedLocator", "(", "locs", ")", ")", "self", ".", "_logitude_degrees", "=", "degrees", "self", ".", "xaxis", ".", "set_major_formatter", "(", "self", ".", "ThetaFormatter", "(", "degrees", ")", ")" ]
Set the number of degrees between each longitude grid.
[ "Set", "the", "number", "of", "degrees", "between", "each", "longitude", "grid", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L103-L112
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_position
def set_position(self, pos, which='both'): """Identical to Axes.set_position (This docstring is overwritten).""" self._polar.set_position(pos, which) if self._overlay_axes is not None: self._overlay_axes.set_position(pos, which) LambertAxes.set_position(self, pos, which)
python
def set_position(self, pos, which='both'): """Identical to Axes.set_position (This docstring is overwritten).""" self._polar.set_position(pos, which) if self._overlay_axes is not None: self._overlay_axes.set_position(pos, which) LambertAxes.set_position(self, pos, which)
[ "def", "set_position", "(", "self", ",", "pos", ",", "which", "=", "'both'", ")", ":", "self", ".", "_polar", ".", "set_position", "(", "pos", ",", "which", ")", "if", "self", ".", "_overlay_axes", "is", "not", "None", ":", "self", ".", "_overlay_axes", ".", "set_position", "(", "pos", ",", "which", ")", "LambertAxes", ".", "set_position", "(", "self", ",", "pos", ",", "which", ")" ]
Identical to Axes.set_position (This docstring is overwritten).
[ "Identical", "to", "Axes", ".", "set_position", "(", "This", "docstring", "is", "overwritten", ")", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L114-L119
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_rotation
def set_rotation(self, rotation): """Set the rotation of the stereonet in degrees clockwise from North.""" self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
python
def set_rotation(self, rotation): """Set the rotation of the stereonet in degrees clockwise from North.""" self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
[ "def", "set_rotation", "(", "self", ",", "rotation", ")", ":", "self", ".", "_rotation", "=", "np", ".", "radians", "(", "rotation", ")", "self", ".", "_polar", ".", "set_theta_offset", "(", "self", ".", "_rotation", "+", "np", ".", "pi", "/", "2.0", ")", "self", ".", "transData", ".", "invalidate", "(", ")", "self", ".", "transAxes", ".", "invalidate", "(", ")", "self", ".", "_set_lim_and_transforms", "(", ")" ]
Set the rotation of the stereonet in degrees clockwise from North.
[ "Set", "the", "rotation", "of", "the", "stereonet", "in", "degrees", "clockwise", "from", "North", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L124-L130
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.cla
def cla(self): """Identical to Axes.cla (This docstring is overwritten).""" Axes.cla(self) # Set grid defaults... self.set_longitude_grid(10) self.set_latitude_grid(10) self.set_longitude_grid_ends(80) # Hide all ticks and tick labels for the "native" lon and lat axes self.xaxis.set_minor_locator(NullLocator()) self.yaxis.set_minor_locator(NullLocator()) self.xaxis.set_ticks_position('none') self.yaxis.set_ticks_position('none') self.xaxis.set_tick_params(label1On=False) self.yaxis.set_tick_params(label1On=False) # Set the grid on or off based on the rc params. self.grid(mpl.rcParams['axes.grid']) # Set the default limits (so that the "native" ticklabels will be # correct if they're turned back on)... Axes.set_xlim(self, -2 * self.horizon, 2 * self.horizon) Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0) # Set up the azimuth ticks. self._polar.set_theta_offset(np.radians(self.rotation + 90)) self._polar.set_theta_direction(-1) self._polar.grid(False) self._polar.set_rticks([])
python
def cla(self): """Identical to Axes.cla (This docstring is overwritten).""" Axes.cla(self) # Set grid defaults... self.set_longitude_grid(10) self.set_latitude_grid(10) self.set_longitude_grid_ends(80) # Hide all ticks and tick labels for the "native" lon and lat axes self.xaxis.set_minor_locator(NullLocator()) self.yaxis.set_minor_locator(NullLocator()) self.xaxis.set_ticks_position('none') self.yaxis.set_ticks_position('none') self.xaxis.set_tick_params(label1On=False) self.yaxis.set_tick_params(label1On=False) # Set the grid on or off based on the rc params. self.grid(mpl.rcParams['axes.grid']) # Set the default limits (so that the "native" ticklabels will be # correct if they're turned back on)... Axes.set_xlim(self, -2 * self.horizon, 2 * self.horizon) Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0) # Set up the azimuth ticks. self._polar.set_theta_offset(np.radians(self.rotation + 90)) self._polar.set_theta_direction(-1) self._polar.grid(False) self._polar.set_rticks([])
[ "def", "cla", "(", "self", ")", ":", "Axes", ".", "cla", "(", "self", ")", "# Set grid defaults...", "self", ".", "set_longitude_grid", "(", "10", ")", "self", ".", "set_latitude_grid", "(", "10", ")", "self", ".", "set_longitude_grid_ends", "(", "80", ")", "# Hide all ticks and tick labels for the \"native\" lon and lat axes", "self", ".", "xaxis", ".", "set_minor_locator", "(", "NullLocator", "(", ")", ")", "self", ".", "yaxis", ".", "set_minor_locator", "(", "NullLocator", "(", ")", ")", "self", ".", "xaxis", ".", "set_ticks_position", "(", "'none'", ")", "self", ".", "yaxis", ".", "set_ticks_position", "(", "'none'", ")", "self", ".", "xaxis", ".", "set_tick_params", "(", "label1On", "=", "False", ")", "self", ".", "yaxis", ".", "set_tick_params", "(", "label1On", "=", "False", ")", "# Set the grid on or off based on the rc params.", "self", ".", "grid", "(", "mpl", ".", "rcParams", "[", "'axes.grid'", "]", ")", "# Set the default limits (so that the \"native\" ticklabels will be", "# correct if they're turned back on)...", "Axes", ".", "set_xlim", "(", "self", ",", "-", "2", "*", "self", ".", "horizon", ",", "2", "*", "self", ".", "horizon", ")", "Axes", ".", "set_ylim", "(", "self", ",", "-", "np", ".", "pi", "/", "2.0", ",", "np", ".", "pi", "/", "2.0", ")", "# Set up the azimuth ticks.", "self", ".", "_polar", ".", "set_theta_offset", "(", "np", ".", "radians", "(", "self", ".", "rotation", "+", "90", ")", ")", "self", ".", "_polar", ".", "set_theta_direction", "(", "-", "1", ")", "self", ".", "_polar", ".", "grid", "(", "False", ")", "self", ".", "_polar", ".", "set_rticks", "(", "[", "]", ")" ]
Identical to Axes.cla (This docstring is overwritten).
[ "Identical", "to", "Axes", ".", "cla", "(", "This", "docstring", "is", "overwritten", ")", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L140-L169
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.format_coord
def format_coord(self, x, y): """Format displayed coordinates during mouseover of axes.""" p, b = stereonet_math.geographic2plunge_bearing(x, y) s, d = stereonet_math.geographic2pole(x, y) pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0]) sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0]) return u'{}, {}'.format(pb, sd)
python
def format_coord(self, x, y): """Format displayed coordinates during mouseover of axes.""" p, b = stereonet_math.geographic2plunge_bearing(x, y) s, d = stereonet_math.geographic2pole(x, y) pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0]) sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0]) return u'{}, {}'.format(pb, sd)
[ "def", "format_coord", "(", "self", ",", "x", ",", "y", ")", ":", "p", ",", "b", "=", "stereonet_math", ".", "geographic2plunge_bearing", "(", "x", ",", "y", ")", "s", ",", "d", "=", "stereonet_math", ".", "geographic2pole", "(", "x", ",", "y", ")", "pb", "=", "u'P/B={:0.0f}\\u00b0/{:03.0f}\\u00b0'", ".", "format", "(", "p", "[", "0", "]", ",", "b", "[", "0", "]", ")", "sd", "=", "u'S/D={:03.0f}\\u00b0/{:0.0f}\\u00b0'", ".", "format", "(", "s", "[", "0", "]", ",", "d", "[", "0", "]", ")", "return", "u'{}, {}'", ".", "format", "(", "pb", ",", "sd", ")" ]
Format displayed coordinates during mouseover of axes.
[ "Format", "displayed", "coordinates", "during", "mouseover", "of", "axes", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L174-L180
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.grid
def grid(self, b=None, which='major', axis='both', kind='arbitrary', center=None, **kwargs): """ Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. """ grid_on = self._gridOn Axes.grid(self, False) if kind == 'polar': center = 0, 0 if self._overlay_axes is not None: self._overlay_axes.remove() self._overlay_axes = None if not b and b is not None: return if b is None: if grid_on: return if center is None or np.allclose(center, (np.pi/2, 0)): return Axes.grid(self, b, which, axis, **kwargs) self._add_overlay(center) self._overlay_axes.grid(True, which, axis, **kwargs) self._gridOn = True
python
def grid(self, b=None, which='major', axis='both', kind='arbitrary', center=None, **kwargs): """ Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. """ grid_on = self._gridOn Axes.grid(self, False) if kind == 'polar': center = 0, 0 if self._overlay_axes is not None: self._overlay_axes.remove() self._overlay_axes = None if not b and b is not None: return if b is None: if grid_on: return if center is None or np.allclose(center, (np.pi/2, 0)): return Axes.grid(self, b, which, axis, **kwargs) self._add_overlay(center) self._overlay_axes.grid(True, which, axis, **kwargs) self._gridOn = True
[ "def", "grid", "(", "self", ",", "b", "=", "None", ",", "which", "=", "'major'", ",", "axis", "=", "'both'", ",", "kind", "=", "'arbitrary'", ",", "center", "=", "None", ",", "*", "*", "kwargs", ")", ":", "grid_on", "=", "self", ".", "_gridOn", "Axes", ".", "grid", "(", "self", ",", "False", ")", "if", "kind", "==", "'polar'", ":", "center", "=", "0", ",", "0", "if", "self", ".", "_overlay_axes", "is", "not", "None", ":", "self", ".", "_overlay_axes", ".", "remove", "(", ")", "self", ".", "_overlay_axes", "=", "None", "if", "not", "b", "and", "b", "is", "not", "None", ":", "return", "if", "b", "is", "None", ":", "if", "grid_on", ":", "return", "if", "center", "is", "None", "or", "np", ".", "allclose", "(", "center", ",", "(", "np", ".", "pi", "/", "2", ",", "0", ")", ")", ":", "return", "Axes", ".", "grid", "(", "self", ",", "b", ",", "which", ",", "axis", ",", "*", "*", "kwargs", ")", "self", ".", "_add_overlay", "(", "center", ")", "self", ".", "_overlay_axes", ".", "grid", "(", "True", ",", "which", ",", "axis", ",", "*", "*", "kwargs", ")", "self", ".", "_gridOn", "=", "True" ]
Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid.
[ "Usage", "is", "identical", "to", "a", "normal", "axes", "grid", "except", "for", "the", "kind", "and", "center", "kwargs", ".", "kind", "=", "polar", "will", "add", "a", "polar", "overlay", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L182-L229
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._add_overlay
def _add_overlay(self, center): """ Add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. Parameters ---------- center: 2-item tuple of numbers A tuple of (longitude, latitude) in radians that the overlay is centered on. """ plunge, bearing = stereonet_math.geographic2plunge_bearing(*center) lon0, lat0 = center fig = self.get_figure() self._overlay_axes = fig.add_axes(self.get_position(True), frameon=False, projection=self.name, center_longitude=0, center_latitude=np.radians(plunge), label='overlay', rotation=bearing) self._overlay_axes._polar.remove() self._overlay_axes.format_coord = self._overlay_format_coord self._overlay_axes.grid(True)
python
def _add_overlay(self, center): """ Add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. Parameters ---------- center: 2-item tuple of numbers A tuple of (longitude, latitude) in radians that the overlay is centered on. """ plunge, bearing = stereonet_math.geographic2plunge_bearing(*center) lon0, lat0 = center fig = self.get_figure() self._overlay_axes = fig.add_axes(self.get_position(True), frameon=False, projection=self.name, center_longitude=0, center_latitude=np.radians(plunge), label='overlay', rotation=bearing) self._overlay_axes._polar.remove() self._overlay_axes.format_coord = self._overlay_format_coord self._overlay_axes.grid(True)
[ "def", "_add_overlay", "(", "self", ",", "center", ")", ":", "plunge", ",", "bearing", "=", "stereonet_math", ".", "geographic2plunge_bearing", "(", "*", "center", ")", "lon0", ",", "lat0", "=", "center", "fig", "=", "self", ".", "get_figure", "(", ")", "self", ".", "_overlay_axes", "=", "fig", ".", "add_axes", "(", "self", ".", "get_position", "(", "True", ")", ",", "frameon", "=", "False", ",", "projection", "=", "self", ".", "name", ",", "center_longitude", "=", "0", ",", "center_latitude", "=", "np", ".", "radians", "(", "plunge", ")", ",", "label", "=", "'overlay'", ",", "rotation", "=", "bearing", ")", "self", ".", "_overlay_axes", ".", "_polar", ".", "remove", "(", ")", "self", ".", "_overlay_axes", ".", "format_coord", "=", "self", ".", "_overlay_format_coord", "self", ".", "_overlay_axes", ".", "grid", "(", "True", ")" ]
Add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. Parameters ---------- center: 2-item tuple of numbers A tuple of (longitude, latitude) in radians that the overlay is centered on.
[ "Add", "a", "grid", "from", "a", "differently", "-", "centered", "stereonet", ".", "This", "is", "useful", "for", "making", "polar", "stereonets", "that", "still", "use", "the", "same", "coordinate", "system", "as", "a", "standard", "stereonet", ".", "(", "i", ".", "e", ".", "a", "plane", "/", "line", "/", "whatever", "will", "have", "the", "same", "representation", "on", "both", "but", "the", "grid", "is", "displayed", "differently", ".", ")" ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L233-L270
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._polar
def _polar(self): """The "hidden" polar axis used for azimuth labels.""" # This will be called inside LambertAxes.__init__ as well as every # time the axis is cleared, so we need the try/except to avoid having # multiple hidden axes when `cla` is _manually_ called. try: return self._hidden_polar_axes except AttributeError: fig = self.get_figure() self._hidden_polar_axes = fig.add_axes(self.get_position(True), frameon=False, projection='polar') self._hidden_polar_axes.format_coord = self._polar_format_coord return self._hidden_polar_axes
python
def _polar(self): """The "hidden" polar axis used for azimuth labels.""" # This will be called inside LambertAxes.__init__ as well as every # time the axis is cleared, so we need the try/except to avoid having # multiple hidden axes when `cla` is _manually_ called. try: return self._hidden_polar_axes except AttributeError: fig = self.get_figure() self._hidden_polar_axes = fig.add_axes(self.get_position(True), frameon=False, projection='polar') self._hidden_polar_axes.format_coord = self._polar_format_coord return self._hidden_polar_axes
[ "def", "_polar", "(", "self", ")", ":", "# This will be called inside LambertAxes.__init__ as well as every", "# time the axis is cleared, so we need the try/except to avoid having", "# multiple hidden axes when `cla` is _manually_ called.", "try", ":", "return", "self", ".", "_hidden_polar_axes", "except", "AttributeError", ":", "fig", "=", "self", ".", "get_figure", "(", ")", "self", ".", "_hidden_polar_axes", "=", "fig", ".", "add_axes", "(", "self", ".", "get_position", "(", "True", ")", ",", "frameon", "=", "False", ",", "projection", "=", "'polar'", ")", "self", ".", "_hidden_polar_axes", ".", "format_coord", "=", "self", ".", "_polar_format_coord", "return", "self", ".", "_hidden_polar_axes" ]
The "hidden" polar axis used for azimuth labels.
[ "The", "hidden", "polar", "axis", "used", "for", "azimuth", "labels", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L282-L294
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_azimuth_ticks
def set_azimuth_ticks(self, angles, labels=None, frac=None, **kwargs): """ Sets the azimuthal tick locations (Note: tick lines are not currently drawn or supported.). Parameters ---------- angles : sequence of numbers The tick locations in degrees. labels : sequence of strings The tick label at each location. Defaults to a formatted version of the specified angles. frac : number The radial location of the tick labels. 1.0 is the along the edge, 1.1 would be outside, and 0.9 would be inside. **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_thetagrids(angles, labels, frac, **kwargs)
python
def set_azimuth_ticks(self, angles, labels=None, frac=None, **kwargs): """ Sets the azimuthal tick locations (Note: tick lines are not currently drawn or supported.). Parameters ---------- angles : sequence of numbers The tick locations in degrees. labels : sequence of strings The tick label at each location. Defaults to a formatted version of the specified angles. frac : number The radial location of the tick labels. 1.0 is the along the edge, 1.1 would be outside, and 0.9 would be inside. **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_thetagrids(angles, labels, frac, **kwargs)
[ "def", "set_azimuth_ticks", "(", "self", ",", "angles", ",", "labels", "=", "None", ",", "frac", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polar", ".", "set_thetagrids", "(", "angles", ",", "labels", ",", "frac", ",", "*", "*", "kwargs", ")" ]
Sets the azimuthal tick locations (Note: tick lines are not currently drawn or supported.). Parameters ---------- angles : sequence of numbers The tick locations in degrees. labels : sequence of strings The tick label at each location. Defaults to a formatted version of the specified angles. frac : number The radial location of the tick labels. 1.0 is the along the edge, 1.1 would be outside, and 0.9 would be inside. **kwargs Additional parameters are text properties for the labels.
[ "Sets", "the", "azimuthal", "tick", "locations", "(", "Note", ":", "tick", "lines", "are", "not", "currently", "drawn", "or", "supported", ".", ")", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L307-L325
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_azimuth_ticklabels
def set_azimuth_ticklabels(self, labels, fontdict=None, **kwargs): """ Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_xticklabels(labels, fontdict, **kwargs)
python
def set_azimuth_ticklabels(self, labels, fontdict=None, **kwargs): """ Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_xticklabels(labels, fontdict, **kwargs)
[ "def", "set_azimuth_ticklabels", "(", "self", ",", "labels", ",", "fontdict", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polar", ".", "set_xticklabels", "(", "labels", ",", "fontdict", ",", "*", "*", "kwargs", ")" ]
Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels.
[ "Sets", "the", "labels", "for", "the", "azimuthal", "ticks", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L330-L341
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.cone
def cone(self, plunge, bearing, angle, segments=100, bidirectional=True, **kwargs): """ Plot a polygon of a small circle (a.k.a. a cone) with an angular radius of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword arguments are passed on to the ``PathCollection``. (e.g. to have an unfilled small small circle, pass "facecolor='none'".) Parameters ---------- plunge : number or sequence of numbers The plunge of the center of the cone in degrees. bearing : number or sequence of numbers The bearing of the center of the cone in degrees. angle : number or sequence of numbers The angular radius of the cone in degrees. segments : int, optional The number of vertices to use for the cone. Defaults to 100. bidirectional : boolean, optional Whether or not to draw two patches (the one given and its antipode) for each measurement. Defaults to True. **kwargs Additional parameters are ``matplotlib.collections.PatchCollection`` properties. Returns ------- collection : ``matplotlib.collections.PathCollection`` Notes ----- If *bidirectional* is ``True``, two circles will be plotted, even if only one of each pair is visible. This is the default behavior. """ plunge, bearing, angle = np.atleast_1d(plunge, bearing, angle) patches = [] lons, lats = stereonet_math.cone(plunge, bearing, angle, segments) codes = mpath.Path.LINETO * np.ones(segments, dtype=np.uint8) codes[0] = mpath.Path.MOVETO if bidirectional: p, b = -plunge, bearing + 180 alons, alats = stereonet_math.cone(p, b, angle, segments) codes = np.hstack([codes, codes]) lons = np.hstack([lons, alons]) lats = np.hstack([lats, alats]) for lon, lat in zip(lons, lats): xy = np.vstack([lon, lat]).T path = mpath.Path(xy, codes) patches.append(mpatches.PathPatch(path)) col = mcollections.PatchCollection(patches, **kwargs) self.add_collection(col) return col
python
def cone(self, plunge, bearing, angle, segments=100, bidirectional=True, **kwargs): """ Plot a polygon of a small circle (a.k.a. a cone) with an angular radius of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword arguments are passed on to the ``PathCollection``. (e.g. to have an unfilled small small circle, pass "facecolor='none'".) Parameters ---------- plunge : number or sequence of numbers The plunge of the center of the cone in degrees. bearing : number or sequence of numbers The bearing of the center of the cone in degrees. angle : number or sequence of numbers The angular radius of the cone in degrees. segments : int, optional The number of vertices to use for the cone. Defaults to 100. bidirectional : boolean, optional Whether or not to draw two patches (the one given and its antipode) for each measurement. Defaults to True. **kwargs Additional parameters are ``matplotlib.collections.PatchCollection`` properties. Returns ------- collection : ``matplotlib.collections.PathCollection`` Notes ----- If *bidirectional* is ``True``, two circles will be plotted, even if only one of each pair is visible. This is the default behavior. """ plunge, bearing, angle = np.atleast_1d(plunge, bearing, angle) patches = [] lons, lats = stereonet_math.cone(plunge, bearing, angle, segments) codes = mpath.Path.LINETO * np.ones(segments, dtype=np.uint8) codes[0] = mpath.Path.MOVETO if bidirectional: p, b = -plunge, bearing + 180 alons, alats = stereonet_math.cone(p, b, angle, segments) codes = np.hstack([codes, codes]) lons = np.hstack([lons, alons]) lats = np.hstack([lats, alats]) for lon, lat in zip(lons, lats): xy = np.vstack([lon, lat]).T path = mpath.Path(xy, codes) patches.append(mpatches.PathPatch(path)) col = mcollections.PatchCollection(patches, **kwargs) self.add_collection(col) return col
[ "def", "cone", "(", "self", ",", "plunge", ",", "bearing", ",", "angle", ",", "segments", "=", "100", ",", "bidirectional", "=", "True", ",", "*", "*", "kwargs", ")", ":", "plunge", ",", "bearing", ",", "angle", "=", "np", ".", "atleast_1d", "(", "plunge", ",", "bearing", ",", "angle", ")", "patches", "=", "[", "]", "lons", ",", "lats", "=", "stereonet_math", ".", "cone", "(", "plunge", ",", "bearing", ",", "angle", ",", "segments", ")", "codes", "=", "mpath", ".", "Path", ".", "LINETO", "*", "np", ".", "ones", "(", "segments", ",", "dtype", "=", "np", ".", "uint8", ")", "codes", "[", "0", "]", "=", "mpath", ".", "Path", ".", "MOVETO", "if", "bidirectional", ":", "p", ",", "b", "=", "-", "plunge", ",", "bearing", "+", "180", "alons", ",", "alats", "=", "stereonet_math", ".", "cone", "(", "p", ",", "b", ",", "angle", ",", "segments", ")", "codes", "=", "np", ".", "hstack", "(", "[", "codes", ",", "codes", "]", ")", "lons", "=", "np", ".", "hstack", "(", "[", "lons", ",", "alons", "]", ")", "lats", "=", "np", ".", "hstack", "(", "[", "lats", ",", "alats", "]", ")", "for", "lon", ",", "lat", "in", "zip", "(", "lons", ",", "lats", ")", ":", "xy", "=", "np", ".", "vstack", "(", "[", "lon", ",", "lat", "]", ")", ".", "T", "path", "=", "mpath", ".", "Path", "(", "xy", ",", "codes", ")", "patches", ".", "append", "(", "mpatches", ".", "PathPatch", "(", "path", ")", ")", "col", "=", "mcollections", ".", "PatchCollection", "(", "patches", ",", "*", "*", "kwargs", ")", "self", ".", "add_collection", "(", "col", ")", "return", "col" ]
Plot a polygon of a small circle (a.k.a. a cone) with an angular radius of *angle* centered at a p/b of *plunge*, *bearing*. Additional keyword arguments are passed on to the ``PathCollection``. (e.g. to have an unfilled small small circle, pass "facecolor='none'".) Parameters ---------- plunge : number or sequence of numbers The plunge of the center of the cone in degrees. bearing : number or sequence of numbers The bearing of the center of the cone in degrees. angle : number or sequence of numbers The angular radius of the cone in degrees. segments : int, optional The number of vertices to use for the cone. Defaults to 100. bidirectional : boolean, optional Whether or not to draw two patches (the one given and its antipode) for each measurement. Defaults to True. **kwargs Additional parameters are ``matplotlib.collections.PatchCollection`` properties. Returns ------- collection : ``matplotlib.collections.PathCollection`` Notes ----- If *bidirectional* is ``True``, two circles will be plotted, even if only one of each pair is visible. This is the default behavior.
[ "Plot", "a", "polygon", "of", "a", "small", "circle", "(", "a", ".", "k", ".", "a", ".", "a", "cone", ")", "with", "an", "angular", "radius", "of", "*", "angle", "*", "centered", "at", "a", "p", "/", "b", "of", "*", "plunge", "*", "*", "bearing", "*", ".", "Additional", "keyword", "arguments", "are", "passed", "on", "to", "the", "PathCollection", ".", "(", "e", ".", "g", ".", "to", "have", "an", "unfilled", "small", "small", "circle", "pass", "facecolor", "=", "none", ".", ")" ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L347-L401
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.plane
def plane(self, strike, dip, *args, **kwargs): """ Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`. """ segments = kwargs.pop('segments', 100) center = self._center_latitude, self._center_longitude lon, lat = stereonet_math.plane(strike, dip, segments, center) return self.plot(lon, lat, *args, **kwargs)
python
def plane(self, strike, dip, *args, **kwargs): """ Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`. """ segments = kwargs.pop('segments', 100) center = self._center_latitude, self._center_longitude lon, lat = stereonet_math.plane(strike, dip, segments, center) return self.plot(lon, lat, *args, **kwargs)
[ "def", "plane", "(", "self", ",", "strike", ",", "dip", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "segments", "=", "kwargs", ".", "pop", "(", "'segments'", ",", "100", ")", "center", "=", "self", ".", "_center_latitude", ",", "self", ".", "_center_longitude", "lon", ",", "lat", "=", "stereonet_math", ".", "plane", "(", "strike", ",", "dip", ",", "segments", ",", "center", ")", "return", "self", ".", "plot", "(", "lon", ",", "lat", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`.
[ "Plot", "lines", "representing", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "ax", ".", "plot", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L403-L426
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.pole
def pole(self, strike, dip, *args, **kwargs): """ Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.pole(strike, dip) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
python
def pole(self, strike, dip, *args, **kwargs): """ Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.pole(strike, dip) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
[ "def", "pole", "(", "self", ",", "strike", ",", "dip", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "pole", "(", "strike", ",", "dip", ")", "args", ",", "kwargs", "=", "self", ".", "_point_plot_defaults", "(", "args", ",", "kwargs", ")", "return", "self", ".", "plot", "(", "lon", ",", "lat", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "poles", "to", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "ax", ".", "plot", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L428-L448
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.rake
def rake(self, strike, dip, rake_angle, *args, **kwargs): """ Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.rake(strike, dip, rake_angle) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
python
def rake(self, strike, dip, rake_angle, *args, **kwargs): """ Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.rake(strike, dip, rake_angle) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
[ "def", "rake", "(", "self", ",", "strike", ",", "dip", ",", "rake_angle", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "rake", "(", "strike", ",", "dip", ",", "rake_angle", ")", "args", ",", "kwargs", "=", "self", ".", "_point_plot_defaults", "(", "args", ",", "kwargs", ")", "return", "self", ".", "plot", "(", "lon", ",", "lat", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "lineations", "along", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "plot", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L450-L475
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.line
def line(self, plunge, bearing, *args, **kwargs): """ Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.line(plunge, bearing) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot([lon], [lat], *args, **kwargs)
python
def line(self, plunge, bearing, *args, **kwargs): """ Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.line(plunge, bearing) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot([lon], [lat], *args, **kwargs)
[ "def", "line", "(", "self", ",", "plunge", ",", "bearing", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "line", "(", "plunge", ",", "bearing", ")", "args", ",", "kwargs", "=", "self", ".", "_point_plot_defaults", "(", "args", ",", "kwargs", ")", "return", "self", ".", "plot", "(", "[", "lon", "]", ",", "[", "lat", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "linear", "features", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "plot", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L477-L498
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._point_plot_defaults
def _point_plot_defaults(self, args, kwargs): """To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`. """ if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
python
def _point_plot_defaults(self, args, kwargs): """To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`. """ if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
[ "def", "_point_plot_defaults", "(", "self", ",", "args", ",", "kwargs", ")", ":", "if", "args", ":", "return", "args", ",", "kwargs", "if", "'ls'", "not", "in", "kwargs", "and", "'linestyle'", "not", "in", "kwargs", ":", "kwargs", "[", "'linestyle'", "]", "=", "'none'", "if", "'marker'", "not", "in", "kwargs", ":", "kwargs", "[", "'marker'", "]", "=", "'o'", "return", "args", ",", "kwargs" ]
To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`.
[ "To", "avoid", "confusion", "for", "new", "users", "this", "ensures", "that", "scattered", "points", "are", "plotted", "by", "by", "plot", "instead", "of", "points", "joined", "by", "a", "line", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L500-L524
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._contour_helper
def _contour_helper(self, args, kwargs): """Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.""" contour_kwargs = {} contour_kwargs['measurement'] = kwargs.pop('measurement', 'poles') contour_kwargs['method'] = kwargs.pop('method', 'exponential_kamb') contour_kwargs['sigma'] = kwargs.pop('sigma', 3) contour_kwargs['gridsize'] = kwargs.pop('gridsize', 100) contour_kwargs['weights'] = kwargs.pop('weights', None) lon, lat, totals = contouring.density_grid(*args, **contour_kwargs) return lon, lat, totals, kwargs
python
def _contour_helper(self, args, kwargs): """Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.""" contour_kwargs = {} contour_kwargs['measurement'] = kwargs.pop('measurement', 'poles') contour_kwargs['method'] = kwargs.pop('method', 'exponential_kamb') contour_kwargs['sigma'] = kwargs.pop('sigma', 3) contour_kwargs['gridsize'] = kwargs.pop('gridsize', 100) contour_kwargs['weights'] = kwargs.pop('weights', None) lon, lat, totals = contouring.density_grid(*args, **contour_kwargs) return lon, lat, totals, kwargs
[ "def", "_contour_helper", "(", "self", ",", "args", ",", "kwargs", ")", ":", "contour_kwargs", "=", "{", "}", "contour_kwargs", "[", "'measurement'", "]", "=", "kwargs", ".", "pop", "(", "'measurement'", ",", "'poles'", ")", "contour_kwargs", "[", "'method'", "]", "=", "kwargs", ".", "pop", "(", "'method'", ",", "'exponential_kamb'", ")", "contour_kwargs", "[", "'sigma'", "]", "=", "kwargs", ".", "pop", "(", "'sigma'", ",", "3", ")", "contour_kwargs", "[", "'gridsize'", "]", "=", "kwargs", ".", "pop", "(", "'gridsize'", ",", "100", ")", "contour_kwargs", "[", "'weights'", "]", "=", "kwargs", ".", "pop", "(", "'weights'", ",", "None", ")", "lon", ",", "lat", ",", "totals", "=", "contouring", ".", "density_grid", "(", "*", "args", ",", "*", "*", "contour_kwargs", ")", "return", "lon", ",", "lat", ",", "totals", ",", "kwargs" ]
Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.
[ "Unify", "defaults", "and", "common", "functionality", "of", "density_contour", "and", "density_contourf", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L526-L536
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.density_contour
def density_contour(self, *args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots contour lines of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contour` function. Returns ------- A matplotlib ContourSet. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contourf matplotlib.pyplot.contour matplotlib.pyplot.clabel Examples -------- Plot density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips) Plot density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contour(plunges, bearings, measurement='lines') Plot density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contour(strikes, dips, rakes, measurement='rakes') Plot density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contour(lon, lat, measurement='radians') Plot density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, method='kamb', gridsize=10) Plot density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ lon, lat, totals, kwargs = self._contour_helper(args, kwargs) return self.contour(lon, lat, totals, **kwargs)
python
def density_contour(self, *args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots contour lines of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contour` function. Returns ------- A matplotlib ContourSet. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contourf matplotlib.pyplot.contour matplotlib.pyplot.clabel Examples -------- Plot density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips) Plot density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contour(plunges, bearings, measurement='lines') Plot density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contour(strikes, dips, rakes, measurement='rakes') Plot density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contour(lon, lat, measurement='radians') Plot density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, method='kamb', gridsize=10) Plot density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ lon, lat, totals, kwargs = self._contour_helper(args, kwargs) return self.contour(lon, lat, totals, **kwargs)
[ "def", "density_contour", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", ",", "totals", ",", "kwargs", "=", "self", ".", "_contour_helper", "(", "args", ",", "kwargs", ")", "return", "self", ".", "contour", "(", "lon", ",", "lat", ",", "totals", ",", "*", "*", "kwargs", ")" ]
Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots contour lines of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contour` function. Returns ------- A matplotlib ContourSet. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contourf matplotlib.pyplot.contour matplotlib.pyplot.clabel Examples -------- Plot density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips) Plot density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contour(plunges, bearings, measurement='lines') Plot density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contour(strikes, dips, rakes, measurement='rakes') Plot density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contour(lon, lat, measurement='radians') Plot density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, method='kamb', gridsize=10) Plot density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contour(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
[ "Estimates", "point", "density", "of", "the", "given", "linear", "orientation", "measurements", "(", "Interpreted", "as", "poles", "lines", "rakes", "or", "raw", "longitudes", "and", "latitudes", "based", "on", "the", "measurement", "keyword", "argument", ".", ")", "and", "plots", "contour", "lines", "of", "the", "resulting", "density", "distribution", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L538-L675
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.density_contourf
def density_contourf(self, *args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots filled contours of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contourf` function. Returns ------- A matplotlib `QuadContourSet`. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contour matplotlib.pyplot.contourf matplotlib.pyplot.clabel Examples -------- Plot filled density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips) Plot filled density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contourf(plunges, bearings, measurement='lines') Plot filled density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contourf(strikes, dips, rakes, measurement='rakes') Plot filled density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contourf(lon, lat, measurement='radians') Plot filled density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, method='kamb', gridsize=10) Plot filled density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ lon, lat, totals, kwargs = self._contour_helper(args, kwargs) return self.contourf(lon, lat, totals, **kwargs)
python
def density_contourf(self, *args, **kwargs): """ Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots filled contours of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contourf` function. Returns ------- A matplotlib `QuadContourSet`. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contour matplotlib.pyplot.contourf matplotlib.pyplot.clabel Examples -------- Plot filled density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips) Plot filled density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contourf(plunges, bearings, measurement='lines') Plot filled density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contourf(strikes, dips, rakes, measurement='rakes') Plot filled density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contourf(lon, lat, measurement='radians') Plot filled density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, method='kamb', gridsize=10) Plot filled density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. """ lon, lat, totals, kwargs = self._contour_helper(args, kwargs) return self.contourf(lon, lat, totals, **kwargs)
[ "def", "density_contourf", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", ",", "totals", ",", "kwargs", "=", "self", ".", "_contour_helper", "(", "args", ",", "kwargs", ")", "return", "self", ".", "contourf", "(", "lon", ",", "lat", ",", "totals", ",", "*", "*", "kwargs", ")" ]
Estimates point density of the given linear orientation measurements (Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes based on the `measurement` keyword argument.) and plots filled contours of the resulting density distribution. Parameters ---------- *args : A variable number of sequences of measurements. By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for contouring. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. method : string, optional The method of density estimation to use. Defaults to ``"exponential_kamb"``. May be one of the following: ``"exponential_kamb"`` : Kamb with exponential smoothing A modified Kamb method using exponential smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"linear_kamb"`` : Kamb with linear smoothing A modified Kamb method using linear smoothing [1]_. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"kamb"`` : Kamb with no smoothing Kamb's method [2]_ with no smoothing. Units are in numbers of standard deviations by which the density estimate differs from uniform. ``"schmidt"`` : 1% counts The traditional "Schmidt" (a.k.a. 1%) method. Counts points within a counting circle comprising 1% of the total area of the hemisphere. Does not take into account sample size. Units are in points per 1% area. sigma : int or float, optional The number of standard deviations defining the expected number of standard deviations by which a random sample from a uniform distribution of points would be expected to vary from being evenly distributed across the hemisphere. This controls the size of the counting circle, and therefore the degree of smoothing. Higher sigmas will lead to more smoothing of the resulting density distribution. This parameter only applies to Kamb-based methods. Defaults to 3. gridsize : int or 2-item tuple of ints, optional The size of the grid that the density is estimated on. If a single int is given, it is interpreted as an NxN grid. If a tuple of ints is given it is interpreted as (nrows, ncols). Defaults to 100. weights : array-like, optional The relative weight to be applied to each input measurement. The array will be normalized to sum to 1, so absolute value of the weights do not affect the result. Defaults to None. **kwargs Additional keyword arguments are passed on to matplotlib's `contourf` function. Returns ------- A matplotlib `QuadContourSet`. See Also -------- mplstereonet.density_grid mplstereonet.StereonetAxes.density_contour matplotlib.pyplot.contourf matplotlib.pyplot.clabel Examples -------- Plot filled density contours of poles to the specified planes using a modified Kamb method with exponential smoothing [1]_. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips) Plot filled density contours of a set of linear orientation measurements. >>> plunges, bearings = [-10, 20, -30], [120, 315, 86] >>> ax.density_contourf(plunges, bearings, measurement='lines') Plot filled density contours of a set of rake measurements. >>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9] >>> ax.density_contourf(strikes, dips, rakes, measurement='rakes') Plot filled density contours of a set of "raw" longitudes and latitudes. >>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45]) >>> ax.density_contourf(lon, lat, measurement='radians') Plot filled density contours of poles to planes using a Kamb method [2]_ with the density estimated on a 10x10 grid (in long-lat space) >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, method='kamb', gridsize=10) Plot filled density contours of poles to planes with contours at [1,2,3] standard deviations. >>> strikes, dips = [120, 315, 86], [22, 85, 31] >>> ax.density_contourf(strikes, dips, levels=[1,2,3]) References ---------- .. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49. .. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
[ "Estimates", "point", "density", "of", "the", "given", "linear", "orientation", "measurements", "(", "Interpreted", "as", "poles", "lines", "rakes", "or", "raw", "longitudes", "and", "latitudes", "based", "on", "the", "measurement", "keyword", "argument", ".", ")", "and", "plots", "filled", "contours", "of", "the", "resulting", "density", "distribution", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L677-L816
joferkington/mplstereonet
examples/polar_overlay.py
basic
def basic(): """Set up a basic stereonet and plot the same data each time.""" fig, ax = mplstereonet.subplots() strike, dip = 315, 30 ax.plane(strike, dip, color='lightblue') ax.pole(strike, dip, color='green', markersize=15) ax.rake(strike, dip, 40, marker='*', markersize=20, color='green') # Make a bit of room for the title... fig.subplots_adjust(top=0.8) return ax
python
def basic(): """Set up a basic stereonet and plot the same data each time.""" fig, ax = mplstereonet.subplots() strike, dip = 315, 30 ax.plane(strike, dip, color='lightblue') ax.pole(strike, dip, color='green', markersize=15) ax.rake(strike, dip, 40, marker='*', markersize=20, color='green') # Make a bit of room for the title... fig.subplots_adjust(top=0.8) return ax
[ "def", "basic", "(", ")", ":", "fig", ",", "ax", "=", "mplstereonet", ".", "subplots", "(", ")", "strike", ",", "dip", "=", "315", ",", "30", "ax", ".", "plane", "(", "strike", ",", "dip", ",", "color", "=", "'lightblue'", ")", "ax", ".", "pole", "(", "strike", ",", "dip", ",", "color", "=", "'green'", ",", "markersize", "=", "15", ")", "ax", ".", "rake", "(", "strike", ",", "dip", ",", "40", ",", "marker", "=", "'*'", ",", "markersize", "=", "20", ",", "color", "=", "'green'", ")", "# Make a bit of room for the title...", "fig", ".", "subplots_adjust", "(", "top", "=", "0.8", ")", "return", "ax" ]
Set up a basic stereonet and plot the same data each time.
[ "Set", "up", "a", "basic", "stereonet", "and", "plot", "the", "same", "data", "each", "time", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/polar_overlay.py#L23-L35
joferkington/mplstereonet
examples/stereonet_explanation.py
setup_figure
def setup_figure(): """Setup the figure and axes""" fig, axes = mplstereonet.subplots(ncols=2, figsize=(20,10)) for ax in axes: # Make the grid lines solid. ax.grid(ls='-') # Make the longitude grids continue all the way to the poles ax.set_longitude_grid_ends(90) return fig, axes
python
def setup_figure(): """Setup the figure and axes""" fig, axes = mplstereonet.subplots(ncols=2, figsize=(20,10)) for ax in axes: # Make the grid lines solid. ax.grid(ls='-') # Make the longitude grids continue all the way to the poles ax.set_longitude_grid_ends(90) return fig, axes
[ "def", "setup_figure", "(", ")", ":", "fig", ",", "axes", "=", "mplstereonet", ".", "subplots", "(", "ncols", "=", "2", ",", "figsize", "=", "(", "20", ",", "10", ")", ")", "for", "ax", "in", "axes", ":", "# Make the grid lines solid.", "ax", ".", "grid", "(", "ls", "=", "'-'", ")", "# Make the longitude grids continue all the way to the poles", "ax", ".", "set_longitude_grid_ends", "(", "90", ")", "return", "fig", ",", "axes" ]
Setup the figure and axes
[ "Setup", "the", "figure", "and", "axes" ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L17-L25
joferkington/mplstereonet
examples/stereonet_explanation.py
stereonet_projection_explanation
def stereonet_projection_explanation(ax): """Example to explain azimuth and dip on a lower-hemisphere stereonet.""" ax.set_title('Dip and Azimuth', y=1.1, size=18) # Set the azimuth ticks to be just "N", "E", etc. ax.set_azimuth_ticks(range(0, 360, 10)) # Hackishly set some of the azimuth labels to North, East, etc... fmt = ax.yaxis.get_major_formatter() labels = [fmt(item) for item in ax.get_azimuth_ticks()] labels[0] = 'North' labels[9] = 'East' labels[18] = 'South' labels[27] = 'West' ax.set_azimuth_ticklabels(labels) # Unhide the xticklabels and use them for dip labels ax.xaxis.set_tick_params(label1On=True) labels = list(range(10, 100, 10)) + list(range(80, 0, -10)) ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.set_xticklabels([fmt(np.radians(item)) for item in labels]) ax.set_xlabel('Dip or Plunge') xlabel_halo(ax) return ax
python
def stereonet_projection_explanation(ax): """Example to explain azimuth and dip on a lower-hemisphere stereonet.""" ax.set_title('Dip and Azimuth', y=1.1, size=18) # Set the azimuth ticks to be just "N", "E", etc. ax.set_azimuth_ticks(range(0, 360, 10)) # Hackishly set some of the azimuth labels to North, East, etc... fmt = ax.yaxis.get_major_formatter() labels = [fmt(item) for item in ax.get_azimuth_ticks()] labels[0] = 'North' labels[9] = 'East' labels[18] = 'South' labels[27] = 'West' ax.set_azimuth_ticklabels(labels) # Unhide the xticklabels and use them for dip labels ax.xaxis.set_tick_params(label1On=True) labels = list(range(10, 100, 10)) + list(range(80, 0, -10)) ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.set_xticklabels([fmt(np.radians(item)) for item in labels]) ax.set_xlabel('Dip or Plunge') xlabel_halo(ax) return ax
[ "def", "stereonet_projection_explanation", "(", "ax", ")", ":", "ax", ".", "set_title", "(", "'Dip and Azimuth'", ",", "y", "=", "1.1", ",", "size", "=", "18", ")", "# Set the azimuth ticks to be just \"N\", \"E\", etc.", "ax", ".", "set_azimuth_ticks", "(", "range", "(", "0", ",", "360", ",", "10", ")", ")", "# Hackishly set some of the azimuth labels to North, East, etc...", "fmt", "=", "ax", ".", "yaxis", ".", "get_major_formatter", "(", ")", "labels", "=", "[", "fmt", "(", "item", ")", "for", "item", "in", "ax", ".", "get_azimuth_ticks", "(", ")", "]", "labels", "[", "0", "]", "=", "'North'", "labels", "[", "9", "]", "=", "'East'", "labels", "[", "18", "]", "=", "'South'", "labels", "[", "27", "]", "=", "'West'", "ax", ".", "set_azimuth_ticklabels", "(", "labels", ")", "# Unhide the xticklabels and use them for dip labels", "ax", ".", "xaxis", ".", "set_tick_params", "(", "label1On", "=", "True", ")", "labels", "=", "list", "(", "range", "(", "10", ",", "100", ",", "10", ")", ")", "+", "list", "(", "range", "(", "80", ",", "0", ",", "-", "10", ")", ")", "ax", ".", "set_xticks", "(", "np", ".", "radians", "(", "np", ".", "arange", "(", "-", "80", ",", "90", ",", "10", ")", ")", ")", "ax", ".", "set_xticklabels", "(", "[", "fmt", "(", "np", ".", "radians", "(", "item", ")", ")", "for", "item", "in", "labels", "]", ")", "ax", ".", "set_xlabel", "(", "'Dip or Plunge'", ")", "xlabel_halo", "(", "ax", ")", "return", "ax" ]
Example to explain azimuth and dip on a lower-hemisphere stereonet.
[ "Example", "to", "explain", "azimuth", "and", "dip", "on", "a", "lower", "-", "hemisphere", "stereonet", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L27-L52
joferkington/mplstereonet
examples/stereonet_explanation.py
native_projection_explanation
def native_projection_explanation(ax): """Example showing how the "native" longitude and latitude relate to the stereonet projection.""" ax.set_title('Longitude and Latitude', size=18, y=1.1) # Hide the azimuth labels ax.set_azimuth_ticklabels([]) # Make the axis tick labels visible: ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.tick_params(label1On=True) ax.set_xlabel('Longitude') xlabel_halo(ax) return ax
python
def native_projection_explanation(ax): """Example showing how the "native" longitude and latitude relate to the stereonet projection.""" ax.set_title('Longitude and Latitude', size=18, y=1.1) # Hide the azimuth labels ax.set_azimuth_ticklabels([]) # Make the axis tick labels visible: ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.tick_params(label1On=True) ax.set_xlabel('Longitude') xlabel_halo(ax) return ax
[ "def", "native_projection_explanation", "(", "ax", ")", ":", "ax", ".", "set_title", "(", "'Longitude and Latitude'", ",", "size", "=", "18", ",", "y", "=", "1.1", ")", "# Hide the azimuth labels", "ax", ".", "set_azimuth_ticklabels", "(", "[", "]", ")", "# Make the axis tick labels visible:", "ax", ".", "set_xticks", "(", "np", ".", "radians", "(", "np", ".", "arange", "(", "-", "80", ",", "90", ",", "10", ")", ")", ")", "ax", ".", "tick_params", "(", "label1On", "=", "True", ")", "ax", ".", "set_xlabel", "(", "'Longitude'", ")", "xlabel_halo", "(", "ax", ")", "return", "ax" ]
Example showing how the "native" longitude and latitude relate to the stereonet projection.
[ "Example", "showing", "how", "the", "native", "longitude", "and", "latitude", "relate", "to", "the", "stereonet", "projection", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L54-L69
joferkington/mplstereonet
examples/stereonet_explanation.py
xlabel_halo
def xlabel_halo(ax): """Add a white "halo" around the xlabels.""" import matplotlib.patheffects as effects for tick in ax.get_xticklabels() + [ax.xaxis.label]: tick.set_path_effects([effects.withStroke(linewidth=4, foreground='w')])
python
def xlabel_halo(ax): """Add a white "halo" around the xlabels.""" import matplotlib.patheffects as effects for tick in ax.get_xticklabels() + [ax.xaxis.label]: tick.set_path_effects([effects.withStroke(linewidth=4, foreground='w')])
[ "def", "xlabel_halo", "(", "ax", ")", ":", "import", "matplotlib", ".", "patheffects", "as", "effects", "for", "tick", "in", "ax", ".", "get_xticklabels", "(", ")", "+", "[", "ax", ".", "xaxis", ".", "label", "]", ":", "tick", ".", "set_path_effects", "(", "[", "effects", ".", "withStroke", "(", "linewidth", "=", "4", ",", "foreground", "=", "'w'", ")", "]", ")" ]
Add a white "halo" around the xlabels.
[ "Add", "a", "white", "halo", "around", "the", "xlabels", "." ]
train
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L71-L75