repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
misakar/mana | mana/mana.py | admin | def admin(module):
"""add sql modules into admin site"""
# add module into admin site
app = os.getcwd().split('/')[-1]
if app != 'app':
logger.warning('''\033[31m{Warning}\033[0m
==> your current path is \033[32m%s\033[0m\n
==> please add your sql module under app folder!''' % os.getcwd())
exit(1)
admin_path = os.path.join(os.getcwd(), 'admin')
os.chdir(admin_path)
with open('views.py', 'r+') as f:
prev = pos = 0
while f.readline():
prev, pos = pos, f.tell()
f.seek(prev)
f.write(
'\nfrom app.models import %s\nadmin.add_view(ModelView(%s, db.session))'
% (module, module)
)
logger.info('''\033[33m{Info}\033[0m: add module done!''') | python | def admin(module):
"""add sql modules into admin site"""
# add module into admin site
app = os.getcwd().split('/')[-1]
if app != 'app':
logger.warning('''\033[31m{Warning}\033[0m
==> your current path is \033[32m%s\033[0m\n
==> please add your sql module under app folder!''' % os.getcwd())
exit(1)
admin_path = os.path.join(os.getcwd(), 'admin')
os.chdir(admin_path)
with open('views.py', 'r+') as f:
prev = pos = 0
while f.readline():
prev, pos = pos, f.tell()
f.seek(prev)
f.write(
'\nfrom app.models import %s\nadmin.add_view(ModelView(%s, db.session))'
% (module, module)
)
logger.info('''\033[33m{Info}\033[0m: add module done!''') | add sql modules into admin site | https://github.com/misakar/mana/blob/95ccdbf230ed7abc33ea2c878c66d2c8fc72ea69/mana/mana.py#L332-L354 |
misakar/mana | mana/operators/_mkdir_p.py | _mkdir_p | def _mkdir_p(abspath):
"""
Usage:
create the abspath
except the abspath exist
Param:
abspath: the absolutly path you want to be created
"""
try:
os.makedirs(abspath)
except OSError as e:
if (e.errno == errno.EEXIST) and (os.path.isdir(abspath)):
pass
else: raise | python | def _mkdir_p(abspath):
"""
Usage:
create the abspath
except the abspath exist
Param:
abspath: the absolutly path you want to be created
"""
try:
os.makedirs(abspath)
except OSError as e:
if (e.errno == errno.EEXIST) and (os.path.isdir(abspath)):
pass
else: raise | Usage:
create the abspath
except the abspath exist
Param:
abspath: the absolutly path you want to be created | https://github.com/misakar/mana/blob/95ccdbf230ed7abc33ea2c878c66d2c8fc72ea69/mana/operators/_mkdir_p.py#L22-L36 |
bachya/pyairvisual | pyairvisual/errors.py | raise_error | def raise_error(error_type: str) -> None:
"""Raise the appropriate error based on error message."""
try:
error = next((v for k, v in ERROR_CODES.items() if k in error_type))
except StopIteration:
error = AirVisualError
raise error(error_type) | python | def raise_error(error_type: str) -> None:
"""Raise the appropriate error based on error message."""
try:
error = next((v for k, v in ERROR_CODES.items() if k in error_type))
except StopIteration:
error = AirVisualError
raise error(error_type) | Raise the appropriate error based on error message. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/errors.py#L63-L69 |
bachya/pyairvisual | pyairvisual/client.py | _raise_on_error | def _raise_on_error(data: Union[str, dict]) -> None:
"""Raise the appropriate exception on error."""
if isinstance(data, str):
raise_error(data)
elif 'status' in data and data['status'] != 'success':
raise_error(data['data']['message']) | python | def _raise_on_error(data: Union[str, dict]) -> None:
"""Raise the appropriate exception on error."""
if isinstance(data, str):
raise_error(data)
elif 'status' in data and data['status'] != 'success':
raise_error(data['data']['message']) | Raise the appropriate exception on error. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/client.py#L53-L58 |
bachya/pyairvisual | pyairvisual/client.py | Client.request | async def request(
self,
method: str,
endpoint: str,
*,
base_url: str = API_URL_SCAFFOLD,
headers: dict = None,
params: dict = None,
json: dict = None) -> dict:
"""Make a request against AirVisual."""
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if self._api_key:
params.update({'key': self._api_key})
url = '{0}/{1}'.format(base_url, endpoint)
async with self.websession.request(method, url, headers=headers,
params=params, json=json) as resp:
data = await resp.json(content_type=None)
_raise_on_error(data)
return data | python | async def request(
self,
method: str,
endpoint: str,
*,
base_url: str = API_URL_SCAFFOLD,
headers: dict = None,
params: dict = None,
json: dict = None) -> dict:
"""Make a request against AirVisual."""
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if self._api_key:
params.update({'key': self._api_key})
url = '{0}/{1}'.format(base_url, endpoint)
async with self.websession.request(method, url, headers=headers,
params=params, json=json) as resp:
data = await resp.json(content_type=None)
_raise_on_error(data)
return data | Make a request against AirVisual. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/client.py#L25-L50 |
bachya/pyairvisual | example.py | main | async def main() -> None: # pylint: disable=too-many-statements
"""Create the aiohttp session and run the example."""
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession:
client = Client(websession, api_key='<API KEY>')
# Get supported locations (by location):
try:
_LOGGER.info(await client.supported.countries())
_LOGGER.info(await client.supported.states('USA'))
_LOGGER.info(await client.supported.cities('USA', 'Colorado'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get supported locations (by station):
try:
_LOGGER.info(
await client.supported.stations(
'USA', 'Colorado', 'Denver'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (by IP):
try:
_LOGGER.info(await client.api.nearest_city())
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_city(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.city(
city='Los Angeles', state='California', country='USA'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by IP):
try:
_LOGGER.info(await client.api.nearest_station())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_station(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.station(
station='US Embassy in Beijing',
city='Beijing',
state='Beijing',
country='China'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data on AQI ranking:
try:
_LOGGER.info(await client.api.ranking())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get info on a AirVisual Pro node:
_LOGGER.info(await client.api.node('zEp8CifbnasWtToBc')) | python | async def main() -> None: # pylint: disable=too-many-statements
"""Create the aiohttp session and run the example."""
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession:
client = Client(websession, api_key='<API KEY>')
# Get supported locations (by location):
try:
_LOGGER.info(await client.supported.countries())
_LOGGER.info(await client.supported.states('USA'))
_LOGGER.info(await client.supported.cities('USA', 'Colorado'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get supported locations (by station):
try:
_LOGGER.info(
await client.supported.stations(
'USA', 'Colorado', 'Denver'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (by IP):
try:
_LOGGER.info(await client.api.nearest_city())
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_city(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.city(
city='Los Angeles', state='California', country='USA'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by IP):
try:
_LOGGER.info(await client.api.nearest_station())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_station(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.station(
station='US Embassy in Beijing',
city='Beijing',
state='Beijing',
country='China'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data on AQI ranking:
try:
_LOGGER.info(await client.api.ranking())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get info on a AirVisual Pro node:
_LOGGER.info(await client.api.node('zEp8CifbnasWtToBc')) | Create the aiohttp session and run the example. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/example.py#L13-L87 |
bachya/pyairvisual | pyairvisual/api.py | API._nearest | async def _nearest(
self,
kind: str,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict:
"""Return data from nearest city/station (IP or coordinates)."""
params = {}
if latitude and longitude:
params.update({'lat': str(latitude), 'lon': str(longitude)})
data = await self._request(
'get', 'nearest_{0}'.format(kind), params=params)
return data['data'] | python | async def _nearest(
self,
kind: str,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict:
"""Return data from nearest city/station (IP or coordinates)."""
params = {}
if latitude and longitude:
params.update({'lat': str(latitude), 'lon': str(longitude)})
data = await self._request(
'get', 'nearest_{0}'.format(kind), params=params)
return data['data'] | Return data from nearest city/station (IP or coordinates). | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L14-L26 |
bachya/pyairvisual | pyairvisual/api.py | API.city | async def city(self, city: str, state: str, country: str) -> dict:
"""Return data for the specified city."""
data = await self._request(
'get',
'city',
params={
'city': city,
'state': state,
'country': country
})
return data['data'] | python | async def city(self, city: str, state: str, country: str) -> dict:
"""Return data for the specified city."""
data = await self._request(
'get',
'city',
params={
'city': city,
'state': state,
'country': country
})
return data['data'] | Return data for the specified city. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L28-L38 |
bachya/pyairvisual | pyairvisual/api.py | API.nearest_city | async def nearest_city(
self,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict:
"""Return data from nearest city (IP or coordinates)."""
return await self._nearest('city', latitude, longitude) | python | async def nearest_city(
self,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict:
"""Return data from nearest city (IP or coordinates)."""
return await self._nearest('city', latitude, longitude) | Return data from nearest city (IP or coordinates). | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L40-L45 |
bachya/pyairvisual | pyairvisual/api.py | API.node | async def node(self, node_id: str) -> dict:
"""Return data from a node by its ID."""
return await self._request('get', node_id, base_url=NODE_URL_SCAFFOLD) | python | async def node(self, node_id: str) -> dict:
"""Return data from a node by its ID."""
return await self._request('get', node_id, base_url=NODE_URL_SCAFFOLD) | Return data from a node by its ID. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L54-L56 |
bachya/pyairvisual | pyairvisual/supported.py | Supported.cities | async def cities(self, country: str, state: str) -> list:
"""Return a list of supported cities in a country/state."""
data = await self._request(
'get', 'cities', params={
'state': state,
'country': country
})
return [d['city'] for d in data['data']] | python | async def cities(self, country: str, state: str) -> list:
"""Return a list of supported cities in a country/state."""
data = await self._request(
'get', 'cities', params={
'state': state,
'country': country
})
return [d['city'] for d in data['data']] | Return a list of supported cities in a country/state. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/supported.py#L12-L19 |
bachya/pyairvisual | pyairvisual/supported.py | Supported.states | async def states(self, country: str) -> list:
"""Return a list of supported states in a country."""
data = await self._request(
'get', 'states', params={'country': country})
return [d['state'] for d in data['data']] | python | async def states(self, country: str) -> list:
"""Return a list of supported states in a country."""
data = await self._request(
'get', 'states', params={'country': country})
return [d['state'] for d in data['data']] | Return a list of supported states in a country. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/supported.py#L26-L30 |
bachya/pyairvisual | pyairvisual/supported.py | Supported.stations | async def stations(self, city: str, state: str, country: str) -> list:
"""Return a list of supported stations in a city."""
data = await self._request(
'get',
'stations',
params={
'city': city,
'state': state,
'country': country
})
return [station for station in data['data']] | python | async def stations(self, city: str, state: str, country: str) -> list:
"""Return a list of supported stations in a city."""
data = await self._request(
'get',
'stations',
params={
'city': city,
'state': state,
'country': country
})
return [station for station in data['data']] | Return a list of supported stations in a city. | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/supported.py#L32-L42 |
ljcooke/see | see/output.py | column_width | def column_width(tokens):
"""
Return a suitable column width to display one or more strings.
"""
get_len = tools.display_len if PY3 else len
lens = sorted(map(get_len, tokens or [])) or [0]
width = lens[-1]
# adjust for disproportionately long strings
if width >= 18:
most = lens[int(len(lens) * 0.9)]
if most < width + 6:
return most
return width | python | def column_width(tokens):
"""
Return a suitable column width to display one or more strings.
"""
get_len = tools.display_len if PY3 else len
lens = sorted(map(get_len, tokens or [])) or [0]
width = lens[-1]
# adjust for disproportionately long strings
if width >= 18:
most = lens[int(len(lens) * 0.9)]
if most < width + 6:
return most
return width | Return a suitable column width to display one or more strings. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L117-L131 |
ljcooke/see | see/output.py | justify_token | def justify_token(tok, col_width):
"""
Justify a string to fill one or more columns.
"""
get_len = tools.display_len if PY3 else len
tok_len = get_len(tok)
diff_len = tok_len - len(tok) if PY3 else 0
cols = (int(math.ceil(float(tok_len) / col_width))
if col_width < tok_len + 4 else 1)
if cols > 1:
return tok.ljust((col_width * cols) + (4 * cols) - diff_len)
else:
return tok.ljust(col_width + 4 - diff_len) | python | def justify_token(tok, col_width):
"""
Justify a string to fill one or more columns.
"""
get_len = tools.display_len if PY3 else len
tok_len = get_len(tok)
diff_len = tok_len - len(tok) if PY3 else 0
cols = (int(math.ceil(float(tok_len) / col_width))
if col_width < tok_len + 4 else 1)
if cols > 1:
return tok.ljust((col_width * cols) + (4 * cols) - diff_len)
else:
return tok.ljust(col_width + 4 - diff_len) | Justify a string to fill one or more columns. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L134-L148 |
ljcooke/see | see/output.py | display_name | def display_name(name, obj, local):
"""
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
"""
prefix = '' if local else '.'
if isinstance(obj, SeeError):
suffix = '?'
elif hasattr(obj, '__call__'):
suffix = '()'
else:
suffix = ''
return ''.join((prefix, name, suffix)) | python | def display_name(name, obj, local):
"""
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
"""
prefix = '' if local else '.'
if isinstance(obj, SeeError):
suffix = '?'
elif hasattr(obj, '__call__'):
suffix = '()'
else:
suffix = ''
return ''.join((prefix, name, suffix)) | Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L151-L172 |
ljcooke/see | see/output.py | SeeResult.filter | def filter(self, pattern):
"""
Filter the results using a pattern.
This accepts a shell-style wildcard pattern (as used by the fnmatch_
module)::
>>> see([]).filter('*op*')
.copy() .pop()
It also accepts a regular expression. This may be a compiled regular
expression (from the re_ module) or a string that starts with a ``/``
(forward slash) character::
>>> see([]).filter('/[aeiou]{2}/')
.clear() .count()
.. _fnmatch: https://docs.python.org/3/library/fnmatch.html
.. _re: https://docs.python.org/3/library/re.html
"""
if isinstance(pattern, REGEX_TYPE):
func = tools.filter_regex
elif pattern.startswith('/'):
pattern = re.compile(pattern.strip('/'))
func = tools.filter_regex
else:
func = tools.filter_wildcard
return SeeResult(func(self, pattern)) | python | def filter(self, pattern):
"""
Filter the results using a pattern.
This accepts a shell-style wildcard pattern (as used by the fnmatch_
module)::
>>> see([]).filter('*op*')
.copy() .pop()
It also accepts a regular expression. This may be a compiled regular
expression (from the re_ module) or a string that starts with a ``/``
(forward slash) character::
>>> see([]).filter('/[aeiou]{2}/')
.clear() .count()
.. _fnmatch: https://docs.python.org/3/library/fnmatch.html
.. _re: https://docs.python.org/3/library/re.html
"""
if isinstance(pattern, REGEX_TYPE):
func = tools.filter_regex
elif pattern.startswith('/'):
pattern = re.compile(pattern.strip('/'))
func = tools.filter_regex
else:
func = tools.filter_wildcard
return SeeResult(func(self, pattern)) | Filter the results using a pattern.
This accepts a shell-style wildcard pattern (as used by the fnmatch_
module)::
>>> see([]).filter('*op*')
.copy() .pop()
It also accepts a regular expression. This may be a compiled regular
expression (from the re_ module) or a string that starts with a ``/``
(forward slash) character::
>>> see([]).filter('/[aeiou]{2}/')
.clear() .count()
.. _fnmatch: https://docs.python.org/3/library/fnmatch.html
.. _re: https://docs.python.org/3/library/re.html | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L62-L90 |
ljcooke/see | see/output.py | SeeResult.filter_ignoring_case | def filter_ignoring_case(self, pattern):
"""
Like ``filter`` but case-insensitive.
Expects a regular expression string without the surrounding ``/``
characters.
>>> see().filter('^my', ignore_case=True)
MyClass()
"""
return self.filter(re.compile(pattern, re.I)) | python | def filter_ignoring_case(self, pattern):
"""
Like ``filter`` but case-insensitive.
Expects a regular expression string without the surrounding ``/``
characters.
>>> see().filter('^my', ignore_case=True)
MyClass()
"""
return self.filter(re.compile(pattern, re.I)) | Like ``filter`` but case-insensitive.
Expects a regular expression string without the surrounding ``/``
characters.
>>> see().filter('^my', ignore_case=True)
MyClass() | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L92-L103 |
ljcooke/see | see/term.py | term_width | def term_width():
"""
Return the column width of the terminal, or ``None`` if it can't be
determined.
"""
if fcntl and termios:
try:
winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ')
_, width = struct.unpack('hh', winsize)
return width
except IOError:
pass
elif windll and create_string_buffer: # pragma: no cover (windows)
stderr_handle, struct_size = -12, 22
handle = windll.kernel32.GetStdHandle(stderr_handle)
csbi = create_string_buffer(struct_size)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(_, _, _, _, _, left, _, right, _,
_, _) = struct.unpack('hhhhHhhhhhh', csbi.raw)
return right - left + 1
else:
return 0 | python | def term_width():
"""
Return the column width of the terminal, or ``None`` if it can't be
determined.
"""
if fcntl and termios:
try:
winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ')
_, width = struct.unpack('hh', winsize)
return width
except IOError:
pass
elif windll and create_string_buffer: # pragma: no cover (windows)
stderr_handle, struct_size = -12, 22
handle = windll.kernel32.GetStdHandle(stderr_handle)
csbi = create_string_buffer(struct_size)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(_, _, _, _, _, left, _, right, _,
_, _) = struct.unpack('hhhhHhhhhhh', csbi.raw)
return right - left + 1
else:
return 0 | Return the column width of the terminal, or ``None`` if it can't be
determined. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/term.py#L27-L49 |
ljcooke/see | see/term.py | line_width | def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH):
"""
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
"""
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width | python | def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH):
"""
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
"""
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width | Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/term.py#L52-L61 |
Autodesk/pyccc | pyccc/picklers.py | DepartingPickler.persistent_id | def persistent_id(self, obj):
""" Tags objects with a persistent ID, but do NOT emit it
"""
if getattr(obj, '_PERSIST_REFERENCES', None):
objid = id(obj)
obj._persistent_ref = objid
_weakmemos[objid] = obj
return None | python | def persistent_id(self, obj):
""" Tags objects with a persistent ID, but do NOT emit it
"""
if getattr(obj, '_PERSIST_REFERENCES', None):
objid = id(obj)
obj._persistent_ref = objid
_weakmemos[objid] = obj
return None | Tags objects with a persistent ID, but do NOT emit it | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/picklers.py#L35-L42 |
ljcooke/see | see/inspector.py | handle_deprecated_args | def handle_deprecated_args(tokens, args, kwargs):
"""
Backwards compatibility with deprecated arguments ``pattern`` and ``r``.
"""
num_args = len(args)
pattern = args[0] if num_args else kwargs.get('pattern', None)
regex = args[1] if num_args > 1 else kwargs.get('r', None)
if pattern is not None:
tokens = tools.filter_wildcard(tokens, pattern)
sys.stderr.write(
'Please use see().match() now. The "pattern" argument is '
'deprecated and will be removed in a later release. \n')
if regex is not None:
tokens = tools.filter_regex(tokens, re.compile(regex))
sys.stderr.write(
'Please use see().match() now. The "r" argument is '
'deprecated and will be removed in a later release. \n')
return tokens | python | def handle_deprecated_args(tokens, args, kwargs):
"""
Backwards compatibility with deprecated arguments ``pattern`` and ``r``.
"""
num_args = len(args)
pattern = args[0] if num_args else kwargs.get('pattern', None)
regex = args[1] if num_args > 1 else kwargs.get('r', None)
if pattern is not None:
tokens = tools.filter_wildcard(tokens, pattern)
sys.stderr.write(
'Please use see().match() now. The "pattern" argument is '
'deprecated and will be removed in a later release. \n')
if regex is not None:
tokens = tools.filter_regex(tokens, re.compile(regex))
sys.stderr.write(
'Please use see().match() now. The "r" argument is '
'deprecated and will be removed in a later release. \n')
return tokens | Backwards compatibility with deprecated arguments ``pattern`` and ``r``. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/inspector.py#L44-L64 |
ljcooke/see | see/inspector.py | see | def see(obj=DEFAULT_ARG, *args, **kwargs):
"""
see(obj=anything)
Show the features and attributes of an object.
This function takes a single argument, ``obj``, which can be of any type.
A summary of the object is printed immediately in the Python interpreter.
For example::
>>> see([])
[] in + += *
*= < <= == !=
> >= dir() hash()
help() iter() len() repr()
reversed() str() .append() .clear()
.copy() .count() .extend() .index()
.insert() .pop() .remove() .reverse()
.sort()
If this function is run without arguments, it will instead list the objects
that are available in the current scope. ::
>>> see()
os random see() sys
The return value is an instance of :class:`SeeResult`.
"""
use_locals = obj is DEFAULT_ARG
if use_locals:
# Get the local scope from the caller's stack frame.
# Typically this is the scope of an interactive Python session.
obj = Namespace(inspect.currentframe().f_back.f_locals)
tokens = []
attrs = dir(obj)
if not use_locals:
for name, func in INSPECT_FUNCS:
if func(obj):
tokens.append(name)
for feature in FEATURES:
if feature.match(obj, attrs):
tokens.append(feature.symbol)
for attr in filter(lambda a: not a.startswith('_'), attrs):
try:
prop = getattr(obj, attr)
except (AttributeError, Exception): # pylint: disable=broad-except
prop = SeeError()
action = output.display_name(name=attr, obj=prop, local=use_locals)
tokens.append(action)
if args or kwargs:
tokens = handle_deprecated_args(tokens, args, kwargs)
return output.SeeResult(tokens) | python | def see(obj=DEFAULT_ARG, *args, **kwargs):
"""
see(obj=anything)
Show the features and attributes of an object.
This function takes a single argument, ``obj``, which can be of any type.
A summary of the object is printed immediately in the Python interpreter.
For example::
>>> see([])
[] in + += *
*= < <= == !=
> >= dir() hash()
help() iter() len() repr()
reversed() str() .append() .clear()
.copy() .count() .extend() .index()
.insert() .pop() .remove() .reverse()
.sort()
If this function is run without arguments, it will instead list the objects
that are available in the current scope. ::
>>> see()
os random see() sys
The return value is an instance of :class:`SeeResult`.
"""
use_locals = obj is DEFAULT_ARG
if use_locals:
# Get the local scope from the caller's stack frame.
# Typically this is the scope of an interactive Python session.
obj = Namespace(inspect.currentframe().f_back.f_locals)
tokens = []
attrs = dir(obj)
if not use_locals:
for name, func in INSPECT_FUNCS:
if func(obj):
tokens.append(name)
for feature in FEATURES:
if feature.match(obj, attrs):
tokens.append(feature.symbol)
for attr in filter(lambda a: not a.startswith('_'), attrs):
try:
prop = getattr(obj, attr)
except (AttributeError, Exception): # pylint: disable=broad-except
prop = SeeError()
action = output.display_name(name=attr, obj=prop, local=use_locals)
tokens.append(action)
if args or kwargs:
tokens = handle_deprecated_args(tokens, args, kwargs)
return output.SeeResult(tokens) | see(obj=anything)
Show the features and attributes of an object.
This function takes a single argument, ``obj``, which can be of any type.
A summary of the object is printed immediately in the Python interpreter.
For example::
>>> see([])
[] in + += *
*= < <= == !=
> >= dir() hash()
help() iter() len() repr()
reversed() str() .append() .clear()
.copy() .count() .extend() .index()
.insert() .pop() .remove() .reverse()
.sort()
If this function is run without arguments, it will instead list the objects
that are available in the current scope. ::
>>> see()
os random see() sys
The return value is an instance of :class:`SeeResult`. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/inspector.py#L67-L126 |
Autodesk/pyccc | pyccc/files/localfiles.py | LocalFile.open | def open(self, mode='r', encoding=None):
"""Return file-like object (actually opens the file for this class)"""
access_type = self._get_access_type(mode)
return open(self.localpath, 'r'+access_type, encoding=encoding) | python | def open(self, mode='r', encoding=None):
"""Return file-like object (actually opens the file for this class)"""
access_type = self._get_access_type(mode)
return open(self.localpath, 'r'+access_type, encoding=encoding) | Return file-like object (actually opens the file for this class) | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/localfiles.py#L84-L87 |
Autodesk/pyccc | pyccc/files/localfiles.py | CachedFile._open_tmpfile | def _open_tmpfile(self, **kwargs):
"""
Open a temporary, unique file in CACHEDIR (/tmp/cyborgcache) by default.
Leave it open, assign file handle to self.tmpfile
**kwargs are passed to tempfile.NamedTemporaryFile
"""
self.tmpfile = get_tempfile(**kwargs)
path = self.tmpfile.name
return path | python | def _open_tmpfile(self, **kwargs):
"""
Open a temporary, unique file in CACHEDIR (/tmp/cyborgcache) by default.
Leave it open, assign file handle to self.tmpfile
**kwargs are passed to tempfile.NamedTemporaryFile
"""
self.tmpfile = get_tempfile(**kwargs)
path = self.tmpfile.name
return path | Open a temporary, unique file in CACHEDIR (/tmp/cyborgcache) by default.
Leave it open, assign file handle to self.tmpfile
**kwargs are passed to tempfile.NamedTemporaryFile | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/localfiles.py#L113-L122 |
Autodesk/pyccc | pyccc/files/stringcontainer.py | StringContainer.open | def open(self, mode='r', encoding=None):
"""Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): encoding type (only for binary access)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
"""
access_type = self._get_access_type(mode)
if encoding is None:
encoding = self.encoding
# here, we face the task of returning the correct data type
if access_type == 'b':
if not self._isbytes:
content = self._contents.encode(encoding) # unicode in, bytes out
else:
content = self._contents # bytes in, bytes out
return io.BytesIO(content)
else:
assert access_type == 't'
if PYVERSION == 2 and self._isbytes:
return io.BytesIO(self._contents) # bytes in, bytes out (python 2 only)
elif self._isbytes:
content = self._contents.decode(encoding) # bytes in, unicode out
else:
content = self._contents # unicode in, unicode out
return io.StringIO(content) | python | def open(self, mode='r', encoding=None):
"""Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): encoding type (only for binary access)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
"""
access_type = self._get_access_type(mode)
if encoding is None:
encoding = self.encoding
# here, we face the task of returning the correct data type
if access_type == 'b':
if not self._isbytes:
content = self._contents.encode(encoding) # unicode in, bytes out
else:
content = self._contents # bytes in, bytes out
return io.BytesIO(content)
else:
assert access_type == 't'
if PYVERSION == 2 and self._isbytes:
return io.BytesIO(self._contents) # bytes in, bytes out (python 2 only)
elif self._isbytes:
content = self._contents.decode(encoding) # bytes in, unicode out
else:
content = self._contents # unicode in, unicode out
return io.StringIO(content) | Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): encoding type (only for binary access)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/stringcontainer.py#L48-L78 |
Autodesk/pyccc | pyccc/files/stringcontainer.py | StringContainer.put | def put(self, filename, encoding=None):
"""Write the file to the given path
Args:
filename (str): path to write this file to
encoding (str): file encoding (default: system default)
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
if os.path.isdir(filename) and self.source is None:
raise ValueError("Cannot write this object to "
"directory %s without an explicit filename." % filename)
target = get_target_path(filename, self.source)
if encoding is None:
encoding = self.encoding
if self._isbytes:
kwargs = {'mode': 'wb'}
else:
kwargs = {'mode': 'w', 'encoding': encoding}
with open(target, **kwargs) as outfile:
outfile.write(self._contents)
return LocalFile(target, encoded_with=encoding) | python | def put(self, filename, encoding=None):
"""Write the file to the given path
Args:
filename (str): path to write this file to
encoding (str): file encoding (default: system default)
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
if os.path.isdir(filename) and self.source is None:
raise ValueError("Cannot write this object to "
"directory %s without an explicit filename." % filename)
target = get_target_path(filename, self.source)
if encoding is None:
encoding = self.encoding
if self._isbytes:
kwargs = {'mode': 'wb'}
else:
kwargs = {'mode': 'w', 'encoding': encoding}
with open(target, **kwargs) as outfile:
outfile.write(self._contents)
return LocalFile(target, encoded_with=encoding) | Write the file to the given path
Args:
filename (str): path to write this file to
encoding (str): file encoding (default: system default)
Returns:
LocalFile: reference to the copy of the file stored at ``filename`` | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/stringcontainer.py#L94-L123 |
Autodesk/pyccc | pyccc/source_inspections.py | get_global_vars | def get_global_vars(func):
""" Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES
"""
closure = getclosurevars(func)
if closure['nonlocal']:
raise TypeError("Can't launch a job with closure variables: %s" %
closure['nonlocals'].keys())
globalvars = dict(modules={},
functions={},
vars={})
for name, value in closure['global'].items():
if inspect.ismodule(value): # TODO: deal FUNCTIONS from closure
globalvars['modules'][name] = value.__name__
elif inspect.isfunction(value) or inspect.ismethod(value):
globalvars['functions'][name] = value
else:
globalvars['vars'][name] = value
return globalvars | python | def get_global_vars(func):
""" Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES
"""
closure = getclosurevars(func)
if closure['nonlocal']:
raise TypeError("Can't launch a job with closure variables: %s" %
closure['nonlocals'].keys())
globalvars = dict(modules={},
functions={},
vars={})
for name, value in closure['global'].items():
if inspect.ismodule(value): # TODO: deal FUNCTIONS from closure
globalvars['modules'][name] = value.__name__
elif inspect.isfunction(value) or inspect.ismethod(value):
globalvars['functions'][name] = value
else:
globalvars['vars'][name] = value
return globalvars | Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/source_inspections.py#L34-L58 |
Autodesk/pyccc | pyccc/source_inspections.py | getsource | def getsource(classorfunc):
""" Return the source code for a class or function.
Notes:
Returned source will not include any decorators for the object.
This will only return the explicit declaration of the object, not any dependencies
Args:
classorfunc (type or function): the object to get the source code for
Returns:
str: text of source code (without any decorators). Note: in python 2, this returns unicode
"""
if _isbuiltin(classorfunc):
return ''
try:
source = inspect.getsource(classorfunc)
except TypeError: # raised if defined in __main__ - use fallback to get the source instead
source = getsourcefallback(classorfunc)
declaration = []
lines = source.splitlines()
if PY2 and not isinstance(source, unicode):
encoding = detect_encoding(iter(lines).next)[0]
sourcelines = (s.decode(encoding) for s in lines)
else:
sourcelines = iter(lines)
# First, get the declaration
found_keyword = False
for line in sourcelines:
words = line.split()
if not words:
continue
if words[0] in ('def', 'class'):
found_keyword = True
if found_keyword:
cind = line.find(':')
if cind > 0:
declaration.append(line[:cind + 1])
after_decl = line[cind + 1:].strip()
break
else:
declaration.append(line)
bodylines = list(sourcelines) # the rest of the lines are body
# If it's a class, make sure we import its superclasses
# Unfortunately, we need to modify the code to make sure the
# parent classes have the correct names
# TODO: find a better way to do this without having to parse code
if type(classorfunc) == type:
cls = classorfunc
base_imports = {}
for base in cls.__bases__:
if base.__name__ == 'object' and base.__module__ == 'builtins': # don't import `object`
continue
if base in base_imports:
continue
if base.__module__ == '__main__':
continue
base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__)
cind = declaration[0].index('class ')
declstring = declaration[0][:cind] + 'class %s(%s):%s' % (
cls.__name__,
','.join([base.__name__ for base in cls.__bases__]),
after_decl)
declaration = [impstring for c, impstring in base_imports.items()
if c.__module__ != '__builtin__']
declaration.append(declstring)
else:
declaration[-1] += after_decl
return '\n'.join(declaration + bodylines) | python | def getsource(classorfunc):
""" Return the source code for a class or function.
Notes:
Returned source will not include any decorators for the object.
This will only return the explicit declaration of the object, not any dependencies
Args:
classorfunc (type or function): the object to get the source code for
Returns:
str: text of source code (without any decorators). Note: in python 2, this returns unicode
"""
if _isbuiltin(classorfunc):
return ''
try:
source = inspect.getsource(classorfunc)
except TypeError: # raised if defined in __main__ - use fallback to get the source instead
source = getsourcefallback(classorfunc)
declaration = []
lines = source.splitlines()
if PY2 and not isinstance(source, unicode):
encoding = detect_encoding(iter(lines).next)[0]
sourcelines = (s.decode(encoding) for s in lines)
else:
sourcelines = iter(lines)
# First, get the declaration
found_keyword = False
for line in sourcelines:
words = line.split()
if not words:
continue
if words[0] in ('def', 'class'):
found_keyword = True
if found_keyword:
cind = line.find(':')
if cind > 0:
declaration.append(line[:cind + 1])
after_decl = line[cind + 1:].strip()
break
else:
declaration.append(line)
bodylines = list(sourcelines) # the rest of the lines are body
# If it's a class, make sure we import its superclasses
# Unfortunately, we need to modify the code to make sure the
# parent classes have the correct names
# TODO: find a better way to do this without having to parse code
if type(classorfunc) == type:
cls = classorfunc
base_imports = {}
for base in cls.__bases__:
if base.__name__ == 'object' and base.__module__ == 'builtins': # don't import `object`
continue
if base in base_imports:
continue
if base.__module__ == '__main__':
continue
base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__)
cind = declaration[0].index('class ')
declstring = declaration[0][:cind] + 'class %s(%s):%s' % (
cls.__name__,
','.join([base.__name__ for base in cls.__bases__]),
after_decl)
declaration = [impstring for c, impstring in base_imports.items()
if c.__module__ != '__builtin__']
declaration.append(declstring)
else:
declaration[-1] += after_decl
return '\n'.join(declaration + bodylines) | Return the source code for a class or function.
Notes:
Returned source will not include any decorators for the object.
This will only return the explicit declaration of the object, not any dependencies
Args:
classorfunc (type or function): the object to get the source code for
Returns:
str: text of source code (without any decorators). Note: in python 2, this returns unicode | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/source_inspections.py#L61-L138 |
Autodesk/pyccc | pyccc/source_inspections.py | getsourcefallback | def getsourcefallback(cls):
""" Fallback for getting the source of interactively defined classes (typically in ipython)
This is basically just a patched version of the inspect module, in which
we get the code by calling inspect.findsource on an *instancemethod* of
a class for which inspect.findsource fails.
"""
for attr in cls.__dict__:
if inspect.ismethod(getattr(cls, attr)):
imethod = getattr(cls, attr)
break
else:
raise AttributeError(
"Cannot get this class' source; it does not appear to have any methods")
### This part is derived from inspect.findsource ###
module = inspect.getmodule(cls)
file = inspect.getfile(imethod)
lines = linecache.getlines(file, module.__dict__)
name = cls.__name__
pat = re.compile(r'^(\s*)class\s*'+name+r'\b')
# AMVMOD: find the encoding (necessary for python 2 only)
#if PY2:
# with open(file, 'rb') as infile:
# encoding = detect_encoding(infile.readline)[0]
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
toplevel = False
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
flines, flnum = lines, i
toplevel = True
break
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates and not toplevel:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
flines, flnum = lines, candidates[0][1]
elif not candidates and not toplevel:
raise IOError('could not find class definition')
### end modified inspect.findsource ###
# this is what inspect.getsourcelines does
glines = inspect.getblock(flines[flnum:])
# And this is what inspect.getsource does
if False: #if PY2:
return ("".join(glines)).decode(encoding)
else:
return "".join(glines) | python | def getsourcefallback(cls):
""" Fallback for getting the source of interactively defined classes (typically in ipython)
This is basically just a patched version of the inspect module, in which
we get the code by calling inspect.findsource on an *instancemethod* of
a class for which inspect.findsource fails.
"""
for attr in cls.__dict__:
if inspect.ismethod(getattr(cls, attr)):
imethod = getattr(cls, attr)
break
else:
raise AttributeError(
"Cannot get this class' source; it does not appear to have any methods")
### This part is derived from inspect.findsource ###
module = inspect.getmodule(cls)
file = inspect.getfile(imethod)
lines = linecache.getlines(file, module.__dict__)
name = cls.__name__
pat = re.compile(r'^(\s*)class\s*'+name+r'\b')
# AMVMOD: find the encoding (necessary for python 2 only)
#if PY2:
# with open(file, 'rb') as infile:
# encoding = detect_encoding(infile.readline)[0]
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
toplevel = False
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
flines, flnum = lines, i
toplevel = True
break
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates and not toplevel:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
flines, flnum = lines, candidates[0][1]
elif not candidates and not toplevel:
raise IOError('could not find class definition')
### end modified inspect.findsource ###
# this is what inspect.getsourcelines does
glines = inspect.getblock(flines[flnum:])
# And this is what inspect.getsource does
if False: #if PY2:
return ("".join(glines)).decode(encoding)
else:
return "".join(glines) | Fallback for getting the source of interactively defined classes (typically in ipython)
This is basically just a patched version of the inspect module, in which
we get the code by calling inspect.findsource on an *instancemethod* of
a class for which inspect.findsource fails. | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/source_inspections.py#L141-L199 |
Autodesk/pyccc | pyccc/engines/dockerengine.py | Docker.get_job | def get_job(self, jobid):
""" Return a Job object for the requested job id.
The returned object will be suitable for retrieving output, but depending on the engine,
may not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.)
Args:
jobid (str): container id
Returns:
pyccc.job.Job: job object for this container
Raises:
pyccc.exceptions.JobNotFound: if no job could be located for this jobid
"""
import shlex
from pyccc.job import Job
job = Job(engine=self)
job.jobid = job.rundata.containerid = jobid
try:
jobdata = self.client.inspect_container(job.jobid)
except docker.errors.NotFound:
raise exceptions.JobNotFound(
'The daemon could not find containter "%s"' % job.jobid)
cmd = jobdata['Config']['Cmd']
entrypoint = jobdata['Config']['Entrypoint']
if len(cmd) == 3 and cmd[0:2] == ['sh', '-c']:
cmd = cmd[2]
elif entrypoint is not None:
cmd = entrypoint + cmd
if isinstance(cmd, list):
cmd = ' '.join(shlex.quote(x) for x in cmd)
job.command = cmd
job.env = jobdata['Config']['Env']
job.workingdir = jobdata['Config']['WorkingDir']
job.rundata.container = jobdata
return job | python | def get_job(self, jobid):
""" Return a Job object for the requested job id.
The returned object will be suitable for retrieving output, but depending on the engine,
may not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.)
Args:
jobid (str): container id
Returns:
pyccc.job.Job: job object for this container
Raises:
pyccc.exceptions.JobNotFound: if no job could be located for this jobid
"""
import shlex
from pyccc.job import Job
job = Job(engine=self)
job.jobid = job.rundata.containerid = jobid
try:
jobdata = self.client.inspect_container(job.jobid)
except docker.errors.NotFound:
raise exceptions.JobNotFound(
'The daemon could not find containter "%s"' % job.jobid)
cmd = jobdata['Config']['Cmd']
entrypoint = jobdata['Config']['Entrypoint']
if len(cmd) == 3 and cmd[0:2] == ['sh', '-c']:
cmd = cmd[2]
elif entrypoint is not None:
cmd = entrypoint + cmd
if isinstance(cmd, list):
cmd = ' '.join(shlex.quote(x) for x in cmd)
job.command = cmd
job.env = jobdata['Config']['Env']
job.workingdir = jobdata['Config']['WorkingDir']
job.rundata.container = jobdata
return job | Return a Job object for the requested job id.
The returned object will be suitable for retrieving output, but depending on the engine,
may not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.)
Args:
jobid (str): container id
Returns:
pyccc.job.Job: job object for this container
Raises:
pyccc.exceptions.JobNotFound: if no job could be located for this jobid | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/dockerengine.py#L81-L123 |
Autodesk/pyccc | pyccc/engines/dockerengine.py | Docker.submit | def submit(self, job):
""" Submit job to the engine
Args:
job (pyccc.job.Job): Job to submit
"""
self._check_job(job)
if job.workingdir is None:
job.workingdir = self.default_wdir
job.imageid = du.create_provisioned_image(self.client, job.image,
job.workingdir, job.inputs)
container_args = self._generate_container_args(job)
job.rundata.container = self.client.create_container(job.imageid, **container_args)
self.client.start(job.rundata.container)
job.rundata.containerid = job.rundata.container['Id']
job.jobid = job.rundata.containerid | python | def submit(self, job):
""" Submit job to the engine
Args:
job (pyccc.job.Job): Job to submit
"""
self._check_job(job)
if job.workingdir is None:
job.workingdir = self.default_wdir
job.imageid = du.create_provisioned_image(self.client, job.image,
job.workingdir, job.inputs)
container_args = self._generate_container_args(job)
job.rundata.container = self.client.create_container(job.imageid, **container_args)
self.client.start(job.rundata.container)
job.rundata.containerid = job.rundata.container['Id']
job.jobid = job.rundata.containerid | Submit job to the engine
Args:
job (pyccc.job.Job): Job to submit | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/dockerengine.py#L125-L143 |
Autodesk/pyccc | pyccc/engines/dockerengine.py | Docker.dump_all_outputs | def dump_all_outputs(self, job, target, abspaths=None):
""" Specialized dumping strategy - copy the entire working directory, then discard
the input files that came along for the ride.
Not used if there are absolute paths
This is slow and wasteful if there are big input files
"""
import os
import shutil
from pathlib import Path
root = Path(native_str(target))
true_outputs = job.get_output()
if abspaths or len(true_outputs) < self.BULK_OUTPUT_FILE_THRESHOLD:
return super().dump_all_outputs(job, root, abspaths)
stagingdir = root / Path(native_str(job.workingdir)).name
workdir = job.get_directory(job.workingdir)
if not root.is_dir():
root.mkdir(parents=False)
if stagingdir.exists():
if PY2:
raise IOError('Path % exists' % stagingdir)
else:
raise FileExistsError(stagingdir)
workdir.put(str(root))
assert stagingdir.is_dir()
assert root in stagingdir.parents
for pathstr in true_outputs:
if os.path.isabs(pathstr):
continue
destpath = root / pathstr
currpath = stagingdir / pathstr
if not destpath.parent.is_dir():
destpath.parent.mkdir(parents=True)
currpath.rename(destpath)
shutil.rmtree(str(stagingdir)) | python | def dump_all_outputs(self, job, target, abspaths=None):
""" Specialized dumping strategy - copy the entire working directory, then discard
the input files that came along for the ride.
Not used if there are absolute paths
This is slow and wasteful if there are big input files
"""
import os
import shutil
from pathlib import Path
root = Path(native_str(target))
true_outputs = job.get_output()
if abspaths or len(true_outputs) < self.BULK_OUTPUT_FILE_THRESHOLD:
return super().dump_all_outputs(job, root, abspaths)
stagingdir = root / Path(native_str(job.workingdir)).name
workdir = job.get_directory(job.workingdir)
if not root.is_dir():
root.mkdir(parents=False)
if stagingdir.exists():
if PY2:
raise IOError('Path % exists' % stagingdir)
else:
raise FileExistsError(stagingdir)
workdir.put(str(root))
assert stagingdir.is_dir()
assert root in stagingdir.parents
for pathstr in true_outputs:
if os.path.isabs(pathstr):
continue
destpath = root / pathstr
currpath = stagingdir / pathstr
if not destpath.parent.is_dir():
destpath.parent.mkdir(parents=True)
currpath.rename(destpath)
shutil.rmtree(str(stagingdir)) | Specialized dumping strategy - copy the entire working directory, then discard
the input files that came along for the ride.
Not used if there are absolute paths
This is slow and wasteful if there are big input files | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/dockerengine.py#L201-L241 |
Autodesk/pyccc | pyccc/ui.py | FileView.handle_download_click | def handle_download_click(self, *args):
"""
Callback for download button. Downloads the file and replaces the button
with a view of the file.
:param args:
:return:
"""
self.download_button.on_click(self.handle_download_click,remove=True)
self.download_button.description = 'Downloading ...'
self._string = self._fileobj.read()
self.render_string() | python | def handle_download_click(self, *args):
"""
Callback for download button. Downloads the file and replaces the button
with a view of the file.
:param args:
:return:
"""
self.download_button.on_click(self.handle_download_click,remove=True)
self.download_button.description = 'Downloading ...'
self._string = self._fileobj.read()
self.render_string() | Callback for download button. Downloads the file and replaces the button
with a view of the file.
:param args:
:return: | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/ui.py#L157-L167 |
Autodesk/pyccc | pyccc/docker_utils.py | create_build_context | def create_build_context(image, inputs, wdir):
"""
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
"""
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context | python | def create_build_context(image, inputs, wdir):
"""
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
"""
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context | Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/docker_utils.py#L45-L68 |
Autodesk/pyccc | pyccc/docker_utils.py | make_tar_stream | def make_tar_stream(build_context, buffer):
""" Write a tar stream of the build context to the provided buffer
Args:
build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references
buffer (io.BytesIO): writable binary mode buffer
"""
tf = tarfile.TarFile(fileobj=buffer, mode='w')
for context_path, fileobj in build_context.items():
if getattr(fileobj, 'localpath', None) is not None:
tf.add(fileobj.localpath, arcname=context_path)
else:
tar_add_bytes(tf, context_path, fileobj.read('rb'))
tf.close() | python | def make_tar_stream(build_context, buffer):
""" Write a tar stream of the build context to the provided buffer
Args:
build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references
buffer (io.BytesIO): writable binary mode buffer
"""
tf = tarfile.TarFile(fileobj=buffer, mode='w')
for context_path, fileobj in build_context.items():
if getattr(fileobj, 'localpath', None) is not None:
tf.add(fileobj.localpath, arcname=context_path)
else:
tar_add_bytes(tf, context_path, fileobj.read('rb'))
tf.close() | Write a tar stream of the build context to the provided buffer
Args:
build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references
buffer (io.BytesIO): writable binary mode buffer | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/docker_utils.py#L124-L137 |
Autodesk/pyccc | pyccc/docker_utils.py | tar_add_bytes | def tar_add_bytes(tf, filename, bytestring):
""" Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
"""
if not isinstance(bytestring, bytes): # it hasn't been encoded yet
bytestring = bytestring.encode('ascii')
buff = io.BytesIO(bytestring)
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestring)
tf.addfile(tarinfo, buff) | python | def tar_add_bytes(tf, filename, bytestring):
""" Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
"""
if not isinstance(bytestring, bytes): # it hasn't been encoded yet
bytestring = bytestring.encode('ascii')
buff = io.BytesIO(bytestring)
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestring)
tf.addfile(tarinfo, buff) | Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str` | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/docker_utils.py#L140-L154 |
Autodesk/pyccc | pyccc/docker_utils.py | kwargs_from_client | def kwargs_from_client(client, assert_hostname=False):
"""
More or less stolen from docker-py's kwargs_from_env
https://github.com/docker/docker-py/blob/c0ec5512ae7ab90f7fac690064e37181186b1928/docker/utils/utils.py
:type client : docker.Client
"""
from docker import tls
if client.base_url in ('http+docker://localunixsocket', 'http+docker://localhost'):
return {'base_url': 'unix://var/run/docker.sock'}
params = {'base_url': client.base_url}
if client.cert:
# TODO: problem - client.cert is filepaths, and it would be insecure to send those files.
params['tls'] = tls.TLSConfig(
client_cert=client.cert,
ca_cert=client.verify,
verify=bool(client.verify),
assert_hostname=assert_hostname)
return params | python | def kwargs_from_client(client, assert_hostname=False):
"""
More or less stolen from docker-py's kwargs_from_env
https://github.com/docker/docker-py/blob/c0ec5512ae7ab90f7fac690064e37181186b1928/docker/utils/utils.py
:type client : docker.Client
"""
from docker import tls
if client.base_url in ('http+docker://localunixsocket', 'http+docker://localhost'):
return {'base_url': 'unix://var/run/docker.sock'}
params = {'base_url': client.base_url}
if client.cert:
# TODO: problem - client.cert is filepaths, and it would be insecure to send those files.
params['tls'] = tls.TLSConfig(
client_cert=client.cert,
ca_cert=client.verify,
verify=bool(client.verify),
assert_hostname=assert_hostname)
return params | More or less stolen from docker-py's kwargs_from_env
https://github.com/docker/docker-py/blob/c0ec5512ae7ab90f7fac690064e37181186b1928/docker/utils/utils.py
:type client : docker.Client | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/docker_utils.py#L194-L213 |
Autodesk/pyccc | pyccc/static/run_job.py | MappedUnpickler.find_class | def find_class(self, module, name):
""" This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility """
import pickle
modname = self.RENAMETABLE.get(module, module)
try:
# can't use ``super`` here (not 2/3 compatible)
klass = pickle.Unpickler.find_class(self, modname, name)
except (ImportError, RuntimeError):
definition = getattr(source, name)
newmod = _makemod(modname)
sys.modules[modname] = newmod
setattr(newmod, name, definition)
klass = pickle.Unpickler.find_class(self, newmod.__name__, name)
klass.__module__ = module
return klass | python | def find_class(self, module, name):
""" This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility """
import pickle
modname = self.RENAMETABLE.get(module, module)
try:
# can't use ``super`` here (not 2/3 compatible)
klass = pickle.Unpickler.find_class(self, modname, name)
except (ImportError, RuntimeError):
definition = getattr(source, name)
newmod = _makemod(modname)
sys.modules[modname] = newmod
setattr(newmod, name, definition)
klass = pickle.Unpickler.find_class(self, newmod.__name__, name)
klass.__module__ = module
return klass | This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/static/run_job.py#L97-L124 |
Autodesk/pyccc | pyccc/utils.py | gist_diff | def gist_diff():
"""Diff this file with the gist on github"""
remote_file = wget(RAW_GIST)
proc = subprocess.Popen(('diff - %s'%MY_PATH).split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(remote_file)
return stdout | python | def gist_diff():
"""Diff this file with the gist on github"""
remote_file = wget(RAW_GIST)
proc = subprocess.Popen(('diff - %s'%MY_PATH).split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(remote_file)
return stdout | Diff this file with the gist on github | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L34-L41 |
Autodesk/pyccc | pyccc/utils.py | wget | def wget(url):
"""
Download the page into a string
"""
import urllib.parse
request = urllib.request.urlopen(url)
filestring = request.read()
return filestring | python | def wget(url):
"""
Download the page into a string
"""
import urllib.parse
request = urllib.request.urlopen(url)
filestring = request.read()
return filestring | Download the page into a string | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L44-L51 |
Autodesk/pyccc | pyccc/utils.py | autodecode | def autodecode(b):
""" Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect
Args:
b (bytes): byte string
Returns:
str: decoded text string
"""
import warnings
import chardet
try:
return b.decode()
except UnicodeError:
result = chardet.detect(b)
if result['confidence'] < 0.95:
warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding'])
return result.decode(result['encoding']) | python | def autodecode(b):
""" Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect
Args:
b (bytes): byte string
Returns:
str: decoded text string
"""
import warnings
import chardet
try:
return b.decode()
except UnicodeError:
result = chardet.detect(b)
if result['confidence'] < 0.95:
warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding'])
return result.decode(result['encoding']) | Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect
Args:
b (bytes): byte string
Returns:
str: decoded text string | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L61-L79 |
Autodesk/pyccc | pyccc/utils.py | can_use_widgets | def can_use_widgets():
""" Expanded from from http://stackoverflow.com/a/34092072/1958900
"""
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
if getattr(get_ipython(), 'kernel', None) is None:
return False
try:
import ipywidgets as ipy
import traitlets
except ImportError:
return False
return True | python | def can_use_widgets():
""" Expanded from from http://stackoverflow.com/a/34092072/1958900
"""
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
if getattr(get_ipython(), 'kernel', None) is None:
return False
try:
import ipywidgets as ipy
import traitlets
except ImportError:
return False
return True | Expanded from from http://stackoverflow.com/a/34092072/1958900 | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L82-L100 |
Autodesk/pyccc | pyccc/utils.py | remove_directories | def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths | python | def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths | Removes non-leafs from a list of directory paths | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L137-L149 |
Autodesk/pyccc | pyccc/engines/subproc.py | Subprocess._check_file_is_under_workingdir | def _check_file_is_under_workingdir(filename, wdir):
""" Raise error if input is being staged to a location not underneath the working dir
"""
p = filename
if not os.path.isabs(p):
p = os.path.join(wdir, p)
targetpath = os.path.realpath(p)
wdir = os.path.realpath(wdir)
common = os.path.commonprefix([wdir, targetpath])
if len(common) < len(wdir):
raise exceptions.PathError(
"The subprocess engine does not support input files with absolute paths")
return p | python | def _check_file_is_under_workingdir(filename, wdir):
""" Raise error if input is being staged to a location not underneath the working dir
"""
p = filename
if not os.path.isabs(p):
p = os.path.join(wdir, p)
targetpath = os.path.realpath(p)
wdir = os.path.realpath(wdir)
common = os.path.commonprefix([wdir, targetpath])
if len(common) < len(wdir):
raise exceptions.PathError(
"The subprocess engine does not support input files with absolute paths")
return p | Raise error if input is being staged to a location not underneath the working dir | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/subproc.py#L89-L101 |
Autodesk/pyccc | pyccc/files/bytecontainer.py | BytesContainer.put | def put(self, filename, encoding=None):
"""Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
if os.path.isdir(filename) and self.source is None:
raise ValueError("Cannot write this object to "
"directory %s without an explicit filename." % filename)
target = get_target_path(filename, self.source)
if (encoding is not None) and (encoding != self.encoded_with):
raise ValueError('%s is already encoded as "%s"' % self, self.encoded_with)
with self.open('rb') as infile, open(target, 'wb') as outfile:
for line in infile:
outfile.write(line)
return LocalFile(target) | python | def put(self, filename, encoding=None):
"""Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
if os.path.isdir(filename) and self.source is None:
raise ValueError("Cannot write this object to "
"directory %s without an explicit filename." % filename)
target = get_target_path(filename, self.source)
if (encoding is not None) and (encoding != self.encoded_with):
raise ValueError('%s is already encoded as "%s"' % self, self.encoded_with)
with self.open('rb') as infile, open(target, 'wb') as outfile:
for line in infile:
outfile.write(line)
return LocalFile(target) | Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename`` | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/bytecontainer.py#L49-L72 |
Autodesk/pyccc | pyccc/files/bytecontainer.py | BytesContainer.open | def open(self, mode='r', encoding=None):
"""Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): text decoding method for text access (default: system default)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
"""
access_type = self._get_access_type(mode)
if access_type == 't' and encoding is not None and encoding != self.encoded_with:
warnings.warn('Attempting to decode %s as "%s", but encoding is declared as "%s"'
% (self, encoding, self.encoded_with))
if encoding is None:
encoding = self.encoded_with
buffer = io.BytesIO(self._contents)
if access_type == 'b':
return buffer
else:
return io.TextIOWrapper(buffer, encoding=encoding) | python | def open(self, mode='r', encoding=None):
"""Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): text decoding method for text access (default: system default)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
"""
access_type = self._get_access_type(mode)
if access_type == 't' and encoding is not None and encoding != self.encoded_with:
warnings.warn('Attempting to decode %s as "%s", but encoding is declared as "%s"'
% (self, encoding, self.encoded_with))
if encoding is None:
encoding = self.encoded_with
buffer = io.BytesIO(self._contents)
if access_type == 'b':
return buffer
else:
return io.TextIOWrapper(buffer, encoding=encoding) | Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): text decoding method for text access (default: system default)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/bytecontainer.py#L74-L97 |
Autodesk/pyccc | pyccc/files/base.py | get_target_path | def get_target_path(destination, origname):
""" Implements the directory/path semantics of linux mv/cp etc.
Examples:
>>> import os
>>> os.makedirs('./a')
>>> get_target_path('./a', '/tmp/myfile')
'./myfile'
>>> get_target_path('./a/b', '/tmp/myfile')
'./a/b'
Raises:
OSError: if neither destination NOR destination's parent exists OR it already exists
"""
if os.path.exists(destination):
if not os.path.isdir(destination):
raise OSError('Cannot write to requested destination %s - file exists' % destination)
return os.path.join(destination, os.path.basename(origname))
else:
destdir = os.path.abspath(os.path.join(destination, os.path.pardir))
if not os.path.isdir(destdir):
raise OSError(
'Cannot write to requested destination %s - parent directory does not exist' %
destination)
return os.path.join(destination) | python | def get_target_path(destination, origname):
""" Implements the directory/path semantics of linux mv/cp etc.
Examples:
>>> import os
>>> os.makedirs('./a')
>>> get_target_path('./a', '/tmp/myfile')
'./myfile'
>>> get_target_path('./a/b', '/tmp/myfile')
'./a/b'
Raises:
OSError: if neither destination NOR destination's parent exists OR it already exists
"""
if os.path.exists(destination):
if not os.path.isdir(destination):
raise OSError('Cannot write to requested destination %s - file exists' % destination)
return os.path.join(destination, os.path.basename(origname))
else:
destdir = os.path.abspath(os.path.join(destination, os.path.pardir))
if not os.path.isdir(destdir):
raise OSError(
'Cannot write to requested destination %s - parent directory does not exist' %
destination)
return os.path.join(destination) | Implements the directory/path semantics of linux mv/cp etc.
Examples:
>>> import os
>>> os.makedirs('./a')
>>> get_target_path('./a', '/tmp/myfile')
'./myfile'
>>> get_target_path('./a/b', '/tmp/myfile')
'./a/b'
Raises:
OSError: if neither destination NOR destination's parent exists OR it already exists | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/base.py#L48-L72 |
Autodesk/pyccc | pyccc/files/base.py | FileReferenceBase.put | def put(self, filename):
"""Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
target = get_target_path(filename, self.source)
with self.open('rb') as infile, open(target, 'wb') as outfile:
shutil.copyfileobj(infile, outfile)
return LocalFile(target) | python | def put(self, filename):
"""Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
"""
from . import LocalFile
target = get_target_path(filename, self.source)
with self.open('rb') as infile, open(target, 'wb') as outfile:
shutil.copyfileobj(infile, outfile)
return LocalFile(target) | Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename`` | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/base.py#L104-L117 |
Autodesk/pyccc | pyccc/files/base.py | FileReferenceBase._get_access_type | def _get_access_type(self, mode):
""" Make sure mode is appropriate; return 'b' for binary access and 't' for text
"""
access_type = None
for char in mode: # figure out whether it's binary or text access
if char in 'bt':
if access_type is not None:
raise IOError('File mode "%s" contains contradictory flags' % mode)
access_type = char
elif char not in 'rbt':
raise NotImplementedError(
'%s objects are read-only; unsupported mode "%s"'%
(type(self), mode))
if access_type is None: access_type = 't'
return access_type | python | def _get_access_type(self, mode):
""" Make sure mode is appropriate; return 'b' for binary access and 't' for text
"""
access_type = None
for char in mode: # figure out whether it's binary or text access
if char in 'bt':
if access_type is not None:
raise IOError('File mode "%s" contains contradictory flags' % mode)
access_type = char
elif char not in 'rbt':
raise NotImplementedError(
'%s objects are read-only; unsupported mode "%s"'%
(type(self), mode))
if access_type is None: access_type = 't'
return access_type | Make sure mode is appropriate; return 'b' for binary access and 't' for text | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/base.py#L151-L166 |
Autodesk/pyccc | pyccc/backports.py | getclosurevars | def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
"""
if inspect.ismethod(func):
func = func.__func__
elif not inspect.isroutine(func):
raise TypeError("'{!r}' is not a Python function".format(func))
# AMVMOD: deal with python 2 builtins that don't define these
code = getattr(func, '__code__', None)
closure = getattr(func, '__closure__', None)
co_names = getattr(code, 'co_names', ())
glb = getattr(func, '__globals__', {})
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if closure is None:
nonlocal_vars = {}
else:
nonlocal_vars = {var: cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = glb
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if inspect.ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return {'nonlocal': nonlocal_vars,
'global': global_vars,
'builtin': builtin_vars,
'unbound': unbound_names} | python | def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
"""
if inspect.ismethod(func):
func = func.__func__
elif not inspect.isroutine(func):
raise TypeError("'{!r}' is not a Python function".format(func))
# AMVMOD: deal with python 2 builtins that don't define these
code = getattr(func, '__code__', None)
closure = getattr(func, '__closure__', None)
co_names = getattr(code, 'co_names', ())
glb = getattr(func, '__globals__', {})
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if closure is None:
nonlocal_vars = {}
else:
nonlocal_vars = {var: cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = glb
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if inspect.ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return {'nonlocal': nonlocal_vars,
'global': global_vars,
'builtin': builtin_vars,
'unbound': unbound_names} | Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES. | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/backports.py#L154-L216 |
Autodesk/pyccc | pyccc/files/directory.py | LocalDirectoryReference.put | def put(self, destination):
""" Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory
"""
target = get_target_path(destination, self.localpath)
shutil.copytree(self.localpath, target) | python | def put(self, destination):
""" Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory
"""
target = get_target_path(destination, self.localpath)
shutil.copytree(self.localpath, target) | Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/directory.py#L34-L46 |
Autodesk/pyccc | pyccc/files/directory.py | DirectoryArchive.put | def put(self, destination):
""" Copy the referenced directory to this path
Note:
This ignores anything not in the desired directory, given by ``self.dirname``.
Args:
destination (str): path to put this directory (which must NOT already exist)
References:
https://stackoverflow.com/a/8261083/1958900
"""
target = get_target_path(destination, self.dirname)
valid_paths = (self.dirname, './%s' % self.dirname)
with tarfile.open(self.archive_path, 'r:*') as tf:
members = []
for tarinfo in tf:
# Get only files under the directory `self.dirname`
pathsplit = os.path.normpath(tarinfo.path).split(os.sep)
if pathsplit[0] not in valid_paths:
print('WARNING: skipped file "%s" in archive; not in directory "%s"' %
(tarinfo.path, self.dirname))
continue
if len(pathsplit) == 1:
continue
tarinfo.name = os.path.join(*pathsplit[1:])
members.append(tarinfo)
if not members:
raise ValueError("No files under path directory '%s' in this tarfile")
tf.extractall(target, members) | python | def put(self, destination):
""" Copy the referenced directory to this path
Note:
This ignores anything not in the desired directory, given by ``self.dirname``.
Args:
destination (str): path to put this directory (which must NOT already exist)
References:
https://stackoverflow.com/a/8261083/1958900
"""
target = get_target_path(destination, self.dirname)
valid_paths = (self.dirname, './%s' % self.dirname)
with tarfile.open(self.archive_path, 'r:*') as tf:
members = []
for tarinfo in tf:
# Get only files under the directory `self.dirname`
pathsplit = os.path.normpath(tarinfo.path).split(os.sep)
if pathsplit[0] not in valid_paths:
print('WARNING: skipped file "%s" in archive; not in directory "%s"' %
(tarinfo.path, self.dirname))
continue
if len(pathsplit) == 1:
continue
tarinfo.name = os.path.join(*pathsplit[1:])
members.append(tarinfo)
if not members:
raise ValueError("No files under path directory '%s' in this tarfile")
tf.extractall(target, members) | Copy the referenced directory to this path
Note:
This ignores anything not in the desired directory, given by ``self.dirname``.
Args:
destination (str): path to put this directory (which must NOT already exist)
References:
https://stackoverflow.com/a/8261083/1958900 | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/directory.py#L63-L95 |
Autodesk/pyccc | pyccc/files/directory.py | DockerArchive.put | def put(self, destination):
""" Copy the referenced directory to this path
Args:
destination (str): path to put this directory (which must NOT already exist)
"""
if not self._fetched:
self._fetch()
DirectoryArchive.put(self, destination) | python | def put(self, destination):
""" Copy the referenced directory to this path
Args:
destination (str): path to put this directory (which must NOT already exist)
"""
if not self._fetched:
self._fetch()
DirectoryArchive.put(self, destination) | Copy the referenced directory to this path
Args:
destination (str): path to put this directory (which must NOT already exist) | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/files/directory.py#L111-L119 |
Autodesk/pyccc | pyccc/engines/base.py | EngineBase.dump_all_outputs | def dump_all_outputs(self, job, target, abspaths=None):
""" Default dumping strategy - potentially slow for large numbers of files
Subclasses should offer faster implementations, if available
"""
from pathlib import Path
root = Path(native_str(target))
for outputpath, outputfile in job.get_output().items():
path = Path(native_str(outputpath))
# redirect absolute paths into the appropriate subdirectory
if path.is_absolute():
if abspaths:
path = Path(native_str(abspaths), *path.parts[1:])
else:
continue
dest = root / path
if not dest.parent.is_dir():
dest.parent.mkdir(parents=True)
if dest.is_file():
dest.unlink()
try:
outputfile.put(str(dest))
except IsADirectoryError:
if not dest.is_dir():
dest.mkdir(parents=True) | python | def dump_all_outputs(self, job, target, abspaths=None):
""" Default dumping strategy - potentially slow for large numbers of files
Subclasses should offer faster implementations, if available
"""
from pathlib import Path
root = Path(native_str(target))
for outputpath, outputfile in job.get_output().items():
path = Path(native_str(outputpath))
# redirect absolute paths into the appropriate subdirectory
if path.is_absolute():
if abspaths:
path = Path(native_str(abspaths), *path.parts[1:])
else:
continue
dest = root / path
if not dest.parent.is_dir():
dest.parent.mkdir(parents=True)
if dest.is_file():
dest.unlink()
try:
outputfile.put(str(dest))
except IsADirectoryError:
if not dest.is_dir():
dest.mkdir(parents=True) | Default dumping strategy - potentially slow for large numbers of files
Subclasses should offer faster implementations, if available | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/base.py#L63-L90 |
Autodesk/pyccc | pyccc/engines/base.py | EngineBase.launch | def launch(self, image, command, **kwargs):
"""
Create a job on this engine
Args:
image (str): name of the docker image to launch
command (str): shell command to run
"""
if isinstance(command, PythonCall):
return PythonJob(self, image, command, **kwargs)
else:
return Job(self, image, command, **kwargs) | python | def launch(self, image, command, **kwargs):
"""
Create a job on this engine
Args:
image (str): name of the docker image to launch
command (str): shell command to run
"""
if isinstance(command, PythonCall):
return PythonJob(self, image, command, **kwargs)
else:
return Job(self, image, command, **kwargs) | Create a job on this engine
Args:
image (str): name of the docker image to launch
command (str): shell command to run | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/engines/base.py#L92-L103 |
ljcooke/see | see/tools.py | char_width | def char_width(char):
"""
Get the display length of a unicode character.
"""
if ord(char) < 128:
return 1
elif unicodedata.east_asian_width(char) in ('F', 'W'):
return 2
elif unicodedata.category(char) in ('Mn',):
return 0
else:
return 1 | python | def char_width(char):
"""
Get the display length of a unicode character.
"""
if ord(char) < 128:
return 1
elif unicodedata.east_asian_width(char) in ('F', 'W'):
return 2
elif unicodedata.category(char) in ('Mn',):
return 0
else:
return 1 | Get the display length of a unicode character. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/tools.py#L16-L27 |
ljcooke/see | see/tools.py | display_len | def display_len(text):
"""
Get the display length of a string. This can differ from the character
length if the string contains wide characters.
"""
text = unicodedata.normalize('NFD', text)
return sum(char_width(char) for char in text) | python | def display_len(text):
"""
Get the display length of a string. This can differ from the character
length if the string contains wide characters.
"""
text = unicodedata.normalize('NFD', text)
return sum(char_width(char) for char in text) | Get the display length of a string. This can differ from the character
length if the string contains wide characters. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/tools.py#L30-L36 |
ljcooke/see | see/tools.py | filter_regex | def filter_regex(names, regex):
"""
Return a tuple of strings that match the regular expression pattern.
"""
return tuple(name for name in names
if regex.search(name) is not None) | python | def filter_regex(names, regex):
"""
Return a tuple of strings that match the regular expression pattern.
"""
return tuple(name for name in names
if regex.search(name) is not None) | Return a tuple of strings that match the regular expression pattern. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/tools.py#L39-L44 |
ljcooke/see | see/tools.py | filter_wildcard | def filter_wildcard(names, pattern):
"""
Return a tuple of strings that match a shell-style wildcard pattern.
"""
return tuple(name for name in names
if fnmatch.fnmatch(name, pattern)) | python | def filter_wildcard(names, pattern):
"""
Return a tuple of strings that match a shell-style wildcard pattern.
"""
return tuple(name for name in names
if fnmatch.fnmatch(name, pattern)) | Return a tuple of strings that match a shell-style wildcard pattern. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/tools.py#L47-L52 |
ljcooke/see | see/features.py | HelpFeature.match | def match(self, obj, attrs):
"""
Only match if the object contains a non-empty docstring.
"""
if '__doc__' in attrs:
lstrip = getattr(obj.__doc__, 'lstrip', False)
return lstrip and any(lstrip()) | python | def match(self, obj, attrs):
"""
Only match if the object contains a non-empty docstring.
"""
if '__doc__' in attrs:
lstrip = getattr(obj.__doc__, 'lstrip', False)
return lstrip and any(lstrip()) | Only match if the object contains a non-empty docstring. | https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/features.py#L78-L84 |
Autodesk/pyccc | pyccc/python.py | PackagedFunction.run | def run(self, func=None):
"""
Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value
"""
to_run = self.prepare_namespace(func)
result = to_run(*self.args, **self.kwargs)
return result | python | def run(self, func=None):
"""
Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value
"""
to_run = self.prepare_namespace(func)
result = to_run(*self.args, **self.kwargs)
return result | Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/python.py#L278-L290 |
Autodesk/pyccc | pyccc/python.py | PackagedFunction.prepare_namespace | def prepare_namespace(self, func):
"""
Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function
"""
if self.is_imethod:
to_run = getattr(self.obj, self.imethod_name)
else:
to_run = func
for varname, modulename in self.global_modules.items():
to_run.__globals__[varname] = __import__(modulename)
if self.global_closure:
to_run.__globals__.update(self.global_closure)
if self.global_functions:
to_run.__globals__.update(self.global_functions)
return to_run | python | def prepare_namespace(self, func):
"""
Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function
"""
if self.is_imethod:
to_run = getattr(self.obj, self.imethod_name)
else:
to_run = func
for varname, modulename in self.global_modules.items():
to_run.__globals__[varname] = __import__(modulename)
if self.global_closure:
to_run.__globals__.update(self.global_closure)
if self.global_functions:
to_run.__globals__.update(self.global_functions)
return to_run | Prepares the function to be run after deserializing it.
Re-associates any previously bound variables and modules from the closure
Returns:
callable: ready-to-call function | https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/python.py#L292-L311 |
sphinx-contrib/datatemplates | sphinxcontrib/datatemplates/helpers.py | make_list_table | def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | python | def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns. | https://github.com/sphinx-contrib/datatemplates/blob/d1f961e3f3353a1f62b7b0bddab371e5d9804ff3/sphinxcontrib/datatemplates/helpers.py#L1-L24 |
sphinx-contrib/datatemplates | sphinxcontrib/datatemplates/helpers.py | make_list_table_from_mappings | def make_list_table_from_mappings(headers, data, title, columns=None):
"""Build a list-table directive.
:param headers: List of tuples containing header title and key value.
:param data: Iterable of row data, yielding mappings with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
header_names = [h[0] for h in headers]
header_keys = [h[1] for h in headers]
row_data = (
[d.get(k) for k in header_keys]
for d in data
)
return make_list_table(header_names, row_data, title, columns) | python | def make_list_table_from_mappings(headers, data, title, columns=None):
"""Build a list-table directive.
:param headers: List of tuples containing header title and key value.
:param data: Iterable of row data, yielding mappings with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
header_names = [h[0] for h in headers]
header_keys = [h[1] for h in headers]
row_data = (
[d.get(k) for k in header_keys]
for d in data
)
return make_list_table(header_names, row_data, title, columns) | Build a list-table directive.
:param headers: List of tuples containing header title and key value.
:param data: Iterable of row data, yielding mappings with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns. | https://github.com/sphinx-contrib/datatemplates/blob/d1f961e3f3353a1f62b7b0bddab371e5d9804ff3/sphinxcontrib/datatemplates/helpers.py#L27-L41 |
openstates/billy | billy/web/public/templatetags/customtags.py | decimal_format | def decimal_format(value, TWOPLACES=Decimal(100) ** -2):
'Format a decimal.Decimal like to 2 decimal places.'
if not isinstance(value, Decimal):
value = Decimal(str(value))
return value.quantize(TWOPLACES) | python | def decimal_format(value, TWOPLACES=Decimal(100) ** -2):
'Format a decimal.Decimal like to 2 decimal places.'
if not isinstance(value, Decimal):
value = Decimal(str(value))
return value.quantize(TWOPLACES) | Format a decimal.Decimal like to 2 decimal places. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/templatetags/customtags.py#L77-L81 |
openstates/billy | billy/web/public/templatetags/customtags.py | notification_preference | def notification_preference(obj_type, profile):
'''Display two radio buttons for turning notifications on or off.
The default value is is have alerts_on = True.
'''
default_alert_value = True
if not profile:
alerts_on = True
else:
notifications = profile.get('notifications', {})
alerts_on = notifications.get(obj_type, default_alert_value)
return dict(alerts_on=alerts_on, obj_type=obj_type) | python | def notification_preference(obj_type, profile):
'''Display two radio buttons for turning notifications on or off.
The default value is is have alerts_on = True.
'''
default_alert_value = True
if not profile:
alerts_on = True
else:
notifications = profile.get('notifications', {})
alerts_on = notifications.get(obj_type, default_alert_value)
return dict(alerts_on=alerts_on, obj_type=obj_type) | Display two radio buttons for turning notifications on or off.
The default value is is have alerts_on = True. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/templatetags/customtags.py#L120-L130 |
openstates/billy | billy/models/legislators.py | OldRole.committee_object | def committee_object(self):
'''If the committee id no longer exists in mongo for some reason,
this function returns None.
'''
if 'committee_id' in self:
_id = self['committee_id']
return self.document._old_roles_committees.get(_id)
else:
return self | python | def committee_object(self):
'''If the committee id no longer exists in mongo for some reason,
this function returns None.
'''
if 'committee_id' in self:
_id = self['committee_id']
return self.document._old_roles_committees.get(_id)
else:
return self | If the committee id no longer exists in mongo for some reason,
this function returns None. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/legislators.py#L72-L80 |
openstates/billy | billy/models/legislators.py | Legislator.context_role | def context_role(self, bill=None, vote=None, session=None, term=None):
'''Tell this legislator object which session to use when calculating
the legisator's context_role for a given bill or vote.
'''
# If no hints were given about the context, look for a related bill,
# then for a related vote.
if not any([bill, vote, session, term]):
try:
bill = self.bill
except AttributeError:
# A vote?
try:
vote = self.vote
except AttributeError:
# If we're here, this method was called on a
# Legislator was that doesn't have a related bill or vote.
return ''
# If we still have to historical point of reference, figuring
# out the context role is impossible. Return emtpy string.
if not any([bill, vote, session, term]):
return ''
# First figure out the term.
if bill is not None:
term = bill['_term']
elif vote is not None:
try:
_bill = vote.bill
except AttributeError:
_bill = BillVote(vote).bill
if callable(_bill):
_bill = _bill()
term = _bill['_term']
if term is None and session is not None:
term = term_for_session(self[settings.LEVEL_FIELD], session)
# Use the term to get the related roles. First look in the current
# roles list, then fail over to the old_roles list.
roles = [r for r in self['roles']
if r.get('type') == 'member' and r.get('term') == term]
roles = list(filter(None, roles))
if not roles:
roles = [r for r in self.get('old_roles', {}).get(term, [])
if r.get('type') == 'member']
roles = list(filter(None, roles))
if not roles:
# Legislator had no roles for this term. If there is a related
# bill ro vote, this shouldn't happen, but could if the
# legislator's roles got deleted.
return ''
# If there's only one applicable role, we're done.
if len(roles) == 1:
role = roles.pop()
self['context_role'] = role
return role
# If only one of term or session is given and there are multiple roles:
if not list(filter(None, [bill, vote])):
if term is not None:
role = roles[0]
self['context_role'] = role
return role
# Below, use the date of the related bill or vote to determine
# which (of multiple) roles applies.
# Get the context date.
if session is not None:
# If we're here, we have multiple roles for a single session.
# Try to find the correct one in self.metadata,
# else give up.
session_data = self.metadata['session_details'][session]
for role in roles:
role_start = role.get('start_date')
role_end = role.get('end_date')
# Return the first role that overlaps at all with the
# session.
session_start = session_data.get('start_date')
session_end = session_data.get('end_date')
if session_start and session_end:
started_during = (role_start < session_start <
role_end)
ended_during = (role_start < session_end < role_end)
if started_during or ended_during:
self['context_role'] = role
return role
else:
continue
# Return first role from the session?
role = roles[0]
self['context_role'] = role
return role
if vote is not None:
date = vote['date']
if bill is not None:
date = bill['action_dates']['first']
dates_exist = False
for role in roles:
start_date = role.get('start_date')
end_date = role.get('end_date')
if start_date and end_date:
dates_exist = True
if start_date < date < end_date:
self['context_role'] = role
return role
if dates_exist:
# If we're here, the context date didn't fall into any of the
# legislator's role date ranges.
return ''
else:
# Here the roles didn't have date ranges. Return the last one?
role = roles.pop()
self['context_role'] = role
return role
return '' | python | def context_role(self, bill=None, vote=None, session=None, term=None):
'''Tell this legislator object which session to use when calculating
the legisator's context_role for a given bill or vote.
'''
# If no hints were given about the context, look for a related bill,
# then for a related vote.
if not any([bill, vote, session, term]):
try:
bill = self.bill
except AttributeError:
# A vote?
try:
vote = self.vote
except AttributeError:
# If we're here, this method was called on a
# Legislator was that doesn't have a related bill or vote.
return ''
# If we still have to historical point of reference, figuring
# out the context role is impossible. Return emtpy string.
if not any([bill, vote, session, term]):
return ''
# First figure out the term.
if bill is not None:
term = bill['_term']
elif vote is not None:
try:
_bill = vote.bill
except AttributeError:
_bill = BillVote(vote).bill
if callable(_bill):
_bill = _bill()
term = _bill['_term']
if term is None and session is not None:
term = term_for_session(self[settings.LEVEL_FIELD], session)
# Use the term to get the related roles. First look in the current
# roles list, then fail over to the old_roles list.
roles = [r for r in self['roles']
if r.get('type') == 'member' and r.get('term') == term]
roles = list(filter(None, roles))
if not roles:
roles = [r for r in self.get('old_roles', {}).get(term, [])
if r.get('type') == 'member']
roles = list(filter(None, roles))
if not roles:
# Legislator had no roles for this term. If there is a related
# bill ro vote, this shouldn't happen, but could if the
# legislator's roles got deleted.
return ''
# If there's only one applicable role, we're done.
if len(roles) == 1:
role = roles.pop()
self['context_role'] = role
return role
# If only one of term or session is given and there are multiple roles:
if not list(filter(None, [bill, vote])):
if term is not None:
role = roles[0]
self['context_role'] = role
return role
# Below, use the date of the related bill or vote to determine
# which (of multiple) roles applies.
# Get the context date.
if session is not None:
# If we're here, we have multiple roles for a single session.
# Try to find the correct one in self.metadata,
# else give up.
session_data = self.metadata['session_details'][session]
for role in roles:
role_start = role.get('start_date')
role_end = role.get('end_date')
# Return the first role that overlaps at all with the
# session.
session_start = session_data.get('start_date')
session_end = session_data.get('end_date')
if session_start and session_end:
started_during = (role_start < session_start <
role_end)
ended_during = (role_start < session_end < role_end)
if started_during or ended_during:
self['context_role'] = role
return role
else:
continue
# Return first role from the session?
role = roles[0]
self['context_role'] = role
return role
if vote is not None:
date = vote['date']
if bill is not None:
date = bill['action_dates']['first']
dates_exist = False
for role in roles:
start_date = role.get('start_date')
end_date = role.get('end_date')
if start_date and end_date:
dates_exist = True
if start_date < date < end_date:
self['context_role'] = role
return role
if dates_exist:
# If we're here, the context date didn't fall into any of the
# legislator's role date ranges.
return ''
else:
# Here the roles didn't have date ranges. Return the last one?
role = roles.pop()
self['context_role'] = role
return role
return '' | Tell this legislator object which session to use when calculating
the legisator's context_role for a given bill or vote. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/legislators.py#L161-L287 |
openstates/billy | billy/models/legislators.py | Legislator.old_roles_manager | def old_roles_manager(self):
'''Return old roles, grouped first by term, then by chamber,
then by type.'''
wrapper = self._old_role_wrapper
chamber_getter = operator.methodcaller('get', 'chamber')
for term, roles in self.get('old_roles', {}).items():
chamber_roles = defaultdict(lambda: defaultdict(list))
for chamber, roles in itertools.groupby(roles, chamber_getter):
for role in roles:
role = wrapper(role)
typeslug = role['type'].lower().replace(' ', '_')
chamber_roles[chamber][typeslug].append(role)
yield term, chamber_roles | python | def old_roles_manager(self):
'''Return old roles, grouped first by term, then by chamber,
then by type.'''
wrapper = self._old_role_wrapper
chamber_getter = operator.methodcaller('get', 'chamber')
for term, roles in self.get('old_roles', {}).items():
chamber_roles = defaultdict(lambda: defaultdict(list))
for chamber, roles in itertools.groupby(roles, chamber_getter):
for role in roles:
role = wrapper(role)
typeslug = role['type'].lower().replace(' ', '_')
chamber_roles[chamber][typeslug].append(role)
yield term, chamber_roles | Return old roles, grouped first by term, then by chamber,
then by type. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/legislators.py#L315-L327 |
openstates/billy | billy/importers/names.py | NameMatcher._normalize | def _normalize(self, name):
"""
Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation.
"""
name = re.sub(
r'^(Senator|Representative|Sen\.?|Rep\.?|'
'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|'
'Assembly(member|man|woman)) ',
'',
name)
return name.strip().lower().replace('.', '') | python | def _normalize(self, name):
"""
Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation.
"""
name = re.sub(
r'^(Senator|Representative|Sen\.?|Rep\.?|'
'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|'
'Assembly(member|man|woman)) ',
'',
name)
return name.strip().lower().replace('.', '') | Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/names.py#L120-L131 |
openstates/billy | billy/importers/names.py | NameMatcher._learn | def _learn(self, legislator):
"""
Expects a dictionary with full_name, first_name, last_name and
middle_name elements as key.
While this can grow quickly, we should never be dealing with
more than a few hundred legislators at a time so don't worry about
it.
"""
name, obj = legislator, legislator['_id']
if (legislator['roles'] and legislator['roles'][0]['term'] ==
self._term and legislator['roles'][0]['type'] == 'member'):
chamber = legislator['roles'][0]['chamber']
else:
try:
chamber = legislator['old_roles'][self._term][0].get('chamber')
except KeyError:
raise ValueError("no role in legislator %s [%s] for term %s" %
(legislator['full_name'], legislator['_id'],
self._term))
if '_code' in name:
code = name['_code']
if code in self._codes[chamber] or code in self._codes[None]:
raise ValueError("non-unique legislator code [%s] for %s" %
(code, name['full_name']))
self._codes[chamber][code] = obj
self._codes[None][code] = obj
# We throw possible forms of this name into a set because we
# don't want to try to add the same form twice for the same
# name
forms = set()
def add_form(form):
forms.add(self._normalize(form))
add_form(name['full_name'])
add_form(name['_scraped_name'])
add_form(name['last_name'])
if name['first_name']:
add_form("%s, %s" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'], name['last_name']))
add_form("%s, %s" % (name['last_name'], name['first_name'][0]))
add_form("%s (%s)" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'][0], name['last_name']))
add_form("%s (%s)" % (name['last_name'], name['first_name'][0]))
if name['middle_name']:
add_form("%s, %s %s" % (name['last_name'], name['first_name'],
name['middle_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name']))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'][0],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'],
name['middle_name'][0]))
add_form("%s, %s.%s." % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
for form in forms:
form = self._normalize(form)
if form in self._names[chamber]:
self._names[chamber][form] = None
else:
self._names[chamber][form] = obj
if form in self._names[None]:
self._names[None][form] = None
else:
self._names[None][form] = obj | python | def _learn(self, legislator):
"""
Expects a dictionary with full_name, first_name, last_name and
middle_name elements as key.
While this can grow quickly, we should never be dealing with
more than a few hundred legislators at a time so don't worry about
it.
"""
name, obj = legislator, legislator['_id']
if (legislator['roles'] and legislator['roles'][0]['term'] ==
self._term and legislator['roles'][0]['type'] == 'member'):
chamber = legislator['roles'][0]['chamber']
else:
try:
chamber = legislator['old_roles'][self._term][0].get('chamber')
except KeyError:
raise ValueError("no role in legislator %s [%s] for term %s" %
(legislator['full_name'], legislator['_id'],
self._term))
if '_code' in name:
code = name['_code']
if code in self._codes[chamber] or code in self._codes[None]:
raise ValueError("non-unique legislator code [%s] for %s" %
(code, name['full_name']))
self._codes[chamber][code] = obj
self._codes[None][code] = obj
# We throw possible forms of this name into a set because we
# don't want to try to add the same form twice for the same
# name
forms = set()
def add_form(form):
forms.add(self._normalize(form))
add_form(name['full_name'])
add_form(name['_scraped_name'])
add_form(name['last_name'])
if name['first_name']:
add_form("%s, %s" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'], name['last_name']))
add_form("%s, %s" % (name['last_name'], name['first_name'][0]))
add_form("%s (%s)" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'][0], name['last_name']))
add_form("%s (%s)" % (name['last_name'], name['first_name'][0]))
if name['middle_name']:
add_form("%s, %s %s" % (name['last_name'], name['first_name'],
name['middle_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name']))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'][0],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'],
name['middle_name'][0]))
add_form("%s, %s.%s." % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
for form in forms:
form = self._normalize(form)
if form in self._names[chamber]:
self._names[chamber][form] = None
else:
self._names[chamber][form] = obj
if form in self._names[None]:
self._names[None][form] = None
else:
self._names[None][form] = obj | Expects a dictionary with full_name, first_name, last_name and
middle_name elements as key.
While this can grow quickly, we should never be dealing with
more than a few hundred legislators at a time so don't worry about
it. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/names.py#L133-L215 |
openstates/billy | billy/importers/names.py | NameMatcher.match | def match(self, name, chamber=None):
"""
If this matcher has uniquely seen a matching name, return its
value. Otherwise, return None.
If chamber is set then the search will be limited to legislators
with matching chamber. If chamber is None then the search
will be cross-chamber.
"""
try:
return self._manual[chamber][name]
except KeyError:
pass
if chamber == 'joint':
chamber = None
try:
return self._codes[chamber][name]
except KeyError:
pass
if chamber not in self._names:
logger.warning("Chamber %s is invalid for a legislator." % (
chamber
))
return None
name = self._normalize(name)
return self._names[chamber].get(name, None) | python | def match(self, name, chamber=None):
"""
If this matcher has uniquely seen a matching name, return its
value. Otherwise, return None.
If chamber is set then the search will be limited to legislators
with matching chamber. If chamber is None then the search
will be cross-chamber.
"""
try:
return self._manual[chamber][name]
except KeyError:
pass
if chamber == 'joint':
chamber = None
try:
return self._codes[chamber][name]
except KeyError:
pass
if chamber not in self._names:
logger.warning("Chamber %s is invalid for a legislator." % (
chamber
))
return None
name = self._normalize(name)
return self._names[chamber].get(name, None) | If this matcher has uniquely seen a matching name, return its
value. Otherwise, return None.
If chamber is set then the search will be limited to legislators
with matching chamber. If chamber is None then the search
will be cross-chamber. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/names.py#L217-L246 |
openstates/billy | billy/scrape/__init__.py | get_scraper | def get_scraper(mod_path, scraper_type):
""" import a scraper from the scraper registry """
# act of importing puts it into the registry
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ScrapeError("could not import %s" % mod_path, e)
# now find the class within the module
ScraperClass = None
for k, v in module.__dict__.items():
if k.startswith('_'):
continue
if getattr(v, 'scraper_type', None) == scraper_type:
if ScraperClass:
raise ScrapeError("two %s scrapers found in module %s: %s %s" %
(scraper_type, mod_path, ScraperClass, k))
ScraperClass = v
if not ScraperClass:
raise ScrapeError("no %s scraper found in module %s" % (
scraper_type, mod_path))
return ScraperClass | python | def get_scraper(mod_path, scraper_type):
""" import a scraper from the scraper registry """
# act of importing puts it into the registry
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ScrapeError("could not import %s" % mod_path, e)
# now find the class within the module
ScraperClass = None
for k, v in module.__dict__.items():
if k.startswith('_'):
continue
if getattr(v, 'scraper_type', None) == scraper_type:
if ScraperClass:
raise ScrapeError("two %s scrapers found in module %s: %s %s" %
(scraper_type, mod_path, ScraperClass, k))
ScraperClass = v
if not ScraperClass:
raise ScrapeError("no %s scraper found in module %s" % (
scraper_type, mod_path))
return ScraperClass | import a scraper from the scraper registry | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/__init__.py#L230-L255 |
openstates/billy | billy/scrape/__init__.py | Scraper._load_schemas | def _load_schemas(self):
""" load all schemas into schema dict """
types = ('bill', 'committee', 'person', 'vote', 'event')
for type in types:
schema_path = os.path.join(os.path.split(__file__)[0],
'../schemas/%s.json' % type)
self._schema[type] = json.load(open(schema_path))
self._schema[type]['properties'][settings.LEVEL_FIELD] = {
'minLength': 2, 'type': 'string'}
# bills & votes
self._schema['bill']['properties']['session']['enum'] = \
self.all_sessions()
self._schema['vote']['properties']['session']['enum'] = \
self.all_sessions()
# legislators
terms = [t['name'] for t in self.metadata['terms']]
# ugly break here b/c this line is nearly impossible to split
self._schema['person']['properties']['roles'][
'items']['properties']['term']['enum'] = terms | python | def _load_schemas(self):
""" load all schemas into schema dict """
types = ('bill', 'committee', 'person', 'vote', 'event')
for type in types:
schema_path = os.path.join(os.path.split(__file__)[0],
'../schemas/%s.json' % type)
self._schema[type] = json.load(open(schema_path))
self._schema[type]['properties'][settings.LEVEL_FIELD] = {
'minLength': 2, 'type': 'string'}
# bills & votes
self._schema['bill']['properties']['session']['enum'] = \
self.all_sessions()
self._schema['vote']['properties']['session']['enum'] = \
self.all_sessions()
# legislators
terms = [t['name'] for t in self.metadata['terms']]
# ugly break here b/c this line is nearly impossible to split
self._schema['person']['properties']['roles'][
'items']['properties']['term']['enum'] = terms | load all schemas into schema dict | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/__init__.py#L95-L117 |
openstates/billy | billy/scrape/__init__.py | Scraper.validate_session | def validate_session(self, session, latest_only=False):
""" Check that a session is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if session is invalid
:param session: string representing session to check
"""
if latest_only:
if session != self.metadata['terms'][-1]['sessions'][-1]:
raise NoDataForPeriod(session)
for t in self.metadata['terms']:
if session in t['sessions']:
return True
raise NoDataForPeriod(session) | python | def validate_session(self, session, latest_only=False):
""" Check that a session is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if session is invalid
:param session: string representing session to check
"""
if latest_only:
if session != self.metadata['terms'][-1]['sessions'][-1]:
raise NoDataForPeriod(session)
for t in self.metadata['terms']:
if session in t['sessions']:
return True
raise NoDataForPeriod(session) | Check that a session is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if session is invalid
:param session: string representing session to check | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/__init__.py#L138-L152 |
openstates/billy | billy/scrape/__init__.py | Scraper.validate_term | def validate_term(self, term, latest_only=False):
""" Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
"""
if latest_only:
if term == self.metadata['terms'][-1]['name']:
return True
else:
raise NoDataForPeriod(term)
for t in self.metadata['terms']:
if term == t['name']:
return True
raise NoDataForPeriod(term) | python | def validate_term(self, term, latest_only=False):
""" Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
"""
if latest_only:
if term == self.metadata['terms'][-1]['name']:
return True
else:
raise NoDataForPeriod(term)
for t in self.metadata['terms']:
if term == t['name']:
return True
raise NoDataForPeriod(term) | Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False) | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/__init__.py#L154-L173 |
openstates/billy | billy/scrape/__init__.py | SourcedObject.add_source | def add_source(self, url, **kwargs):
"""
Add a source URL from which data related to this object was scraped.
:param url: the location of the source
"""
self['sources'].append(dict(url=url, **kwargs)) | python | def add_source(self, url, **kwargs):
"""
Add a source URL from which data related to this object was scraped.
:param url: the location of the source
"""
self['sources'].append(dict(url=url, **kwargs)) | Add a source URL from which data related to this object was scraped.
:param url: the location of the source | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/__init__.py#L221-L227 |
openstates/billy | billy/scrape/utils.py | PlaintextColumns._get_column_ends | def _get_column_ends(self):
'''Guess where the ends of the columns lie.
'''
ends = collections.Counter()
for line in self.text.splitlines():
for matchobj in re.finditer('\s{2,}', line.lstrip()):
ends[matchobj.end()] += 1
return ends | python | def _get_column_ends(self):
'''Guess where the ends of the columns lie.
'''
ends = collections.Counter()
for line in self.text.splitlines():
for matchobj in re.finditer('\s{2,}', line.lstrip()):
ends[matchobj.end()] += 1
return ends | Guess where the ends of the columns lie. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/utils.py#L71-L78 |
openstates/billy | billy/scrape/utils.py | PlaintextColumns._get_column_boundaries | def _get_column_boundaries(self):
'''Use the guessed ends to guess the boundaries of the plain
text columns.
'''
# Try to figure out the most common column boundaries.
ends = self._get_column_ends()
if not ends:
# If there aren't even any nontrivial sequences of whitespace
# dividing text, there may be just one column. In which case,
# Return a single span, effectively the whole line.
return [slice(None, None)]
most_common = []
threshold = self.threshold
for k, v in collections.Counter(ends.values()).most_common():
if k >= threshold:
most_common.append(k)
if most_common:
boundaries = []
for k, v in ends.items():
if v in most_common:
boundaries.append(k)
else:
# Here there weren't enough boundaries to guess the most common
# ones, so just use the apparent boundaries. In other words, we
# have only 1 row. Potentially a source of inaccuracy.
boundaries = ends.keys()
# Convert the boundaries into a list of span slices.
boundaries.sort()
last_boundary = boundaries[-1]
boundaries = zip([0] + boundaries, boundaries)
boundaries = list(itertools.starmap(slice, boundaries))
# And get from the last boundary to the line ending.
boundaries.append(slice(last_boundary, None))
return boundaries | python | def _get_column_boundaries(self):
'''Use the guessed ends to guess the boundaries of the plain
text columns.
'''
# Try to figure out the most common column boundaries.
ends = self._get_column_ends()
if not ends:
# If there aren't even any nontrivial sequences of whitespace
# dividing text, there may be just one column. In which case,
# Return a single span, effectively the whole line.
return [slice(None, None)]
most_common = []
threshold = self.threshold
for k, v in collections.Counter(ends.values()).most_common():
if k >= threshold:
most_common.append(k)
if most_common:
boundaries = []
for k, v in ends.items():
if v in most_common:
boundaries.append(k)
else:
# Here there weren't enough boundaries to guess the most common
# ones, so just use the apparent boundaries. In other words, we
# have only 1 row. Potentially a source of inaccuracy.
boundaries = ends.keys()
# Convert the boundaries into a list of span slices.
boundaries.sort()
last_boundary = boundaries[-1]
boundaries = zip([0] + boundaries, boundaries)
boundaries = list(itertools.starmap(slice, boundaries))
# And get from the last boundary to the line ending.
boundaries.append(slice(last_boundary, None))
return boundaries | Use the guessed ends to guess the boundaries of the plain
text columns. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/utils.py#L80-L117 |
openstates/billy | billy/scrape/utils.py | PlaintextColumns.getcells | def getcells(self, line):
'''Using self.boundaries, extract cells from the given line.
'''
for boundary in self.boundaries:
cell = line.lstrip()[boundary].strip()
if cell:
for cell in re.split('\s{3,}', cell):
yield cell
else:
yield None | python | def getcells(self, line):
'''Using self.boundaries, extract cells from the given line.
'''
for boundary in self.boundaries:
cell = line.lstrip()[boundary].strip()
if cell:
for cell in re.split('\s{3,}', cell):
yield cell
else:
yield None | Using self.boundaries, extract cells from the given line. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/utils.py#L127-L136 |
openstates/billy | billy/scrape/utils.py | PlaintextColumns.rows | def rows(self):
'''Returns an iterator of row tuples.
'''
for line in self.text.splitlines():
yield tuple(self.getcells(line)) | python | def rows(self):
'''Returns an iterator of row tuples.
'''
for line in self.text.splitlines():
yield tuple(self.getcells(line)) | Returns an iterator of row tuples. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/utils.py#L138-L142 |
openstates/billy | billy/scrape/utils.py | PlaintextColumns.cells | def cells(self):
'''Returns an interator of all cells in the table.
'''
for line in self.text.splitlines():
for cell in self.getcells(line):
yield cell | python | def cells(self):
'''Returns an interator of all cells in the table.
'''
for line in self.text.splitlines():
for cell in self.getcells(line):
yield cell | Returns an interator of all cells in the table. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/utils.py#L144-L149 |
openstates/billy | billy/models/pagination.py | PaginatorBase._previous_pages_count | def _previous_pages_count(self):
'A generator of previous page integers.'
skip = self.skip
if skip == 0:
return 0
count, remainder = divmod(skip, self.limit)
return count | python | def _previous_pages_count(self):
'A generator of previous page integers.'
skip = self.skip
if skip == 0:
return 0
count, remainder = divmod(skip, self.limit)
return count | A generator of previous page integers. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/pagination.py#L17-L23 |
openstates/billy | billy/models/pagination.py | PaginatorBase.previous_pages_numbers | def previous_pages_numbers(self):
'A generator of previous page integers.'
count = self._previous_pages_count() + 1
for i in reversed(range(1, count)):
yield i | python | def previous_pages_numbers(self):
'A generator of previous page integers.'
count = self._previous_pages_count() + 1
for i in reversed(range(1, count)):
yield i | A generator of previous page integers. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/pagination.py#L31-L35 |
openstates/billy | billy/models/pagination.py | PaginatorBase.range_end | def range_end(self):
'''"Showing 40 - 50 of 234 results
^
'''
count = self.count
range_end = self.range_start + self.limit - 1
if count < range_end:
range_end = count
return range_end | python | def range_end(self):
'''"Showing 40 - 50 of 234 results
^
'''
count = self.count
range_end = self.range_start + self.limit - 1
if count < range_end:
range_end = count
return range_end | "Showing 40 - 50 of 234 results
^ | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/pagination.py#L66-L74 |
openstates/billy | billy/models/pagination.py | PaginatorBase.pagination_data | def pagination_data(self, max_number_of_links=7):
'''Returns a generator of tuples (string, page_number, clickable),
where `string` is the text of the html link, `page_number` is
the number of the page the link points to, and `clickable` is
a boolean indicating whether the link is clickable or not.
'''
div, mod = divmod(max_number_of_links, 2)
if not mod == 1:
msg = 'Max number of links must be odd, was %r.'
raise ValueError(msg % max_number_of_links)
midpoint = (max_number_of_links - 1) / 2
current_page = self.current_page
last_page = self.last_page
if current_page > last_page:
raise Http404('invalid page number')
current_page = last_page
show_link_firstpage = midpoint < current_page
show_link_previouspage = 1 < current_page
show_link_lastpage = current_page < (self.last_page - midpoint)
show_link_nextpage = current_page < last_page
extra_room_previous = midpoint - current_page
if extra_room_previous < 0:
extra_room_previous = 0
if not show_link_firstpage:
extra_room_previous += 1
if not show_link_previouspage:
extra_room_previous += 1
extra_room_subsequent = current_page - last_page + midpoint
extra_room_subsequent = max([extra_room_subsequent, 0])
if not show_link_nextpage:
extra_room_subsequent += 1
if not show_link_lastpage:
extra_room_subsequent += 1
if self.current_page == 1:
yield PageLink(string=1, page_number=1, clickable=False)
else:
# The "first page" link.
if show_link_firstpage:
#[<<] [<] [7] [8] [9] 10 [11] ...'
# ^
yield PageLink(string=u"\u00AB", page_number=1, clickable=True)
if show_link_previouspage:
# The "previous page" link.
#[<<] [<] [7] [8] [9] 10 [11] ...'
# ^
yield PageLink(string=u"\u2039",
page_number=self.previous_page,
clickable=True)
# Up to `midpoint + extra_room_subsequent` previous page numbers.
previous = itertools.islice(self.previous_pages_numbers(),
midpoint + extra_room_subsequent)
previous = list(previous)[::-1]
for page_number in previous:
yield PageLink(string=page_number,
page_number=page_number, clickable=True)
# The current page, clickable.
if current_page != 1:
yield PageLink(string=current_page,
page_number=current_page, clickable=False)
# Up to `midpoint + extra_room_previous` subsequent page numbers.
subsequent_count = midpoint + extra_room_previous
_subsequent_pages_count = self._subsequent_pages_count
if _subsequent_pages_count < subsequent_count:
subsequent_count = _subsequent_pages_count
for page_number in itertools.islice(self.subsequent_pages_numbers(),
subsequent_count):
yield PageLink(string=page_number,
page_number=page_number, clickable=True)
if show_link_nextpage:
yield PageLink(string=u"\u203A",
page_number=current_page + 1,
clickable=True)
if show_link_lastpage:
yield PageLink(string=u"\u00BB",
page_number=last_page,
clickable=True) | python | def pagination_data(self, max_number_of_links=7):
'''Returns a generator of tuples (string, page_number, clickable),
where `string` is the text of the html link, `page_number` is
the number of the page the link points to, and `clickable` is
a boolean indicating whether the link is clickable or not.
'''
div, mod = divmod(max_number_of_links, 2)
if not mod == 1:
msg = 'Max number of links must be odd, was %r.'
raise ValueError(msg % max_number_of_links)
midpoint = (max_number_of_links - 1) / 2
current_page = self.current_page
last_page = self.last_page
if current_page > last_page:
raise Http404('invalid page number')
current_page = last_page
show_link_firstpage = midpoint < current_page
show_link_previouspage = 1 < current_page
show_link_lastpage = current_page < (self.last_page - midpoint)
show_link_nextpage = current_page < last_page
extra_room_previous = midpoint - current_page
if extra_room_previous < 0:
extra_room_previous = 0
if not show_link_firstpage:
extra_room_previous += 1
if not show_link_previouspage:
extra_room_previous += 1
extra_room_subsequent = current_page - last_page + midpoint
extra_room_subsequent = max([extra_room_subsequent, 0])
if not show_link_nextpage:
extra_room_subsequent += 1
if not show_link_lastpage:
extra_room_subsequent += 1
if self.current_page == 1:
yield PageLink(string=1, page_number=1, clickable=False)
else:
# The "first page" link.
if show_link_firstpage:
#[<<] [<] [7] [8] [9] 10 [11] ...'
# ^
yield PageLink(string=u"\u00AB", page_number=1, clickable=True)
if show_link_previouspage:
# The "previous page" link.
#[<<] [<] [7] [8] [9] 10 [11] ...'
# ^
yield PageLink(string=u"\u2039",
page_number=self.previous_page,
clickable=True)
# Up to `midpoint + extra_room_subsequent` previous page numbers.
previous = itertools.islice(self.previous_pages_numbers(),
midpoint + extra_room_subsequent)
previous = list(previous)[::-1]
for page_number in previous:
yield PageLink(string=page_number,
page_number=page_number, clickable=True)
# The current page, clickable.
if current_page != 1:
yield PageLink(string=current_page,
page_number=current_page, clickable=False)
# Up to `midpoint + extra_room_previous` subsequent page numbers.
subsequent_count = midpoint + extra_room_previous
_subsequent_pages_count = self._subsequent_pages_count
if _subsequent_pages_count < subsequent_count:
subsequent_count = _subsequent_pages_count
for page_number in itertools.islice(self.subsequent_pages_numbers(),
subsequent_count):
yield PageLink(string=page_number,
page_number=page_number, clickable=True)
if show_link_nextpage:
yield PageLink(string=u"\u203A",
page_number=current_page + 1,
clickable=True)
if show_link_lastpage:
yield PageLink(string=u"\u00BB",
page_number=last_page,
clickable=True) | Returns a generator of tuples (string, page_number, clickable),
where `string` is the text of the html link, `page_number` is
the number of the page the link points to, and `clickable` is
a boolean indicating whether the link is clickable or not. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/pagination.py#L91-L180 |
openstates/billy | billy/bin/update.py | _run_scraper | def _run_scraper(scraper_type, options, metadata):
"""
scraper_type: bills, legislators, committees, votes
"""
_clear_scraped_data(options.output_dir, scraper_type)
scraper = _get_configured_scraper(scraper_type, options, metadata)
ua_email = os.environ.get('BILLY_UA_EMAIL')
if ua_email and scraper:
scraper.user_agent += ' ({})'.format(ua_email)
if not scraper:
return [{
"type": scraper_type,
"start_time": dt.datetime.utcnow(),
"noscraper": True,
"end_time": dt.datetime.utcnow()
}]
runs = []
# Removed from the inner loop due to non-bicameral scrapers
scrape = {
"type": scraper_type
}
scrape['start_time'] = dt.datetime.utcnow()
if scraper_type in ('bills', 'votes', 'events'):
times = options.sessions
for time in times:
scraper.validate_session(time, scraper.latest_only)
elif scraper_type in ('committees', 'legislators'):
times = options.terms
for time in times:
scraper.validate_term(time, scraper.latest_only)
# run scraper against year/session/term
for time in times:
# old style
chambers = options.chambers
if scraper_type == 'events' and len(options.chambers) == 2:
chambers.append('other')
if _is_old_scrape(scraper.scrape):
for chamber in chambers:
scraper.scrape(chamber, time)
else:
scraper.scrape(time, chambers=chambers)
# error out if events or votes don't scrape anything
if not scraper.object_count and scraper_type not in ('events',
'votes'):
raise ScrapeError("%s scraper didn't save any objects" %
scraper_type)
scrape['end_time'] = dt.datetime.utcnow()
runs.append(scrape)
return runs | python | def _run_scraper(scraper_type, options, metadata):
"""
scraper_type: bills, legislators, committees, votes
"""
_clear_scraped_data(options.output_dir, scraper_type)
scraper = _get_configured_scraper(scraper_type, options, metadata)
ua_email = os.environ.get('BILLY_UA_EMAIL')
if ua_email and scraper:
scraper.user_agent += ' ({})'.format(ua_email)
if not scraper:
return [{
"type": scraper_type,
"start_time": dt.datetime.utcnow(),
"noscraper": True,
"end_time": dt.datetime.utcnow()
}]
runs = []
# Removed from the inner loop due to non-bicameral scrapers
scrape = {
"type": scraper_type
}
scrape['start_time'] = dt.datetime.utcnow()
if scraper_type in ('bills', 'votes', 'events'):
times = options.sessions
for time in times:
scraper.validate_session(time, scraper.latest_only)
elif scraper_type in ('committees', 'legislators'):
times = options.terms
for time in times:
scraper.validate_term(time, scraper.latest_only)
# run scraper against year/session/term
for time in times:
# old style
chambers = options.chambers
if scraper_type == 'events' and len(options.chambers) == 2:
chambers.append('other')
if _is_old_scrape(scraper.scrape):
for chamber in chambers:
scraper.scrape(chamber, time)
else:
scraper.scrape(time, chambers=chambers)
# error out if events or votes don't scrape anything
if not scraper.object_count and scraper_type not in ('events',
'votes'):
raise ScrapeError("%s scraper didn't save any objects" %
scraper_type)
scrape['end_time'] = dt.datetime.utcnow()
runs.append(scrape)
return runs | scraper_type: bills, legislators, committees, votes | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/bin/update.py#L58-L115 |
openstates/billy | billy/importers/bills.py | import_bill | def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update" | python | def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update" | insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization) | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L175-L385 |
openstates/billy | billy/importers/bills.py | populate_current_fields | def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True) | python | def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True) | Set/update _current_term and _current_session fields on all bills
for a given location. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L429-L449 |
openstates/billy | billy/importers/bills.py | GenericIDMatcher.learn_ids | def learn_ids(self, item_list):
""" read in already set ids on objects """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
self.ids[key] = item[self.id_key] | python | def learn_ids(self, item_list):
""" read in already set ids on objects """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
self.ids[key] = item[self.id_key] | read in already set ids on objects | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L522-L527 |
openstates/billy | billy/importers/bills.py | GenericIDMatcher.set_ids | def set_ids(self, item_list):
""" set ids on an object, using internal mapping then new ids """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
item[self.id_key] = self.ids.get(key) or self._get_next_id() | python | def set_ids(self, item_list):
""" set ids on an object, using internal mapping then new ids """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
item[self.id_key] = self.ids.get(key) or self._get_next_id() | set ids on an object, using internal mapping then new ids | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L529-L534 |
openstates/billy | billy/importers/committees.py | import_committees_from_legislators | def import_committees_from_legislators(current_term, abbr):
""" create committees from legislators that have committee roles """
# for all current legislators
for legislator in db.legislators.find({'roles': {'$elemMatch': {
'term': current_term, settings.LEVEL_FIELD: abbr}}}):
# for all committee roles
for role in legislator['roles']:
if (role['type'] == 'committee member' and
'committee_id' not in role):
spec = {settings.LEVEL_FIELD: abbr,
'chamber': role['chamber'],
'committee': role['committee']}
if 'subcommittee' in role:
spec['subcommittee'] = role['subcommittee']
committee = db.committees.find_one(spec)
if not committee:
committee = spec
committee['_type'] = 'committee'
# copy LEVEL_FIELD from legislator to committee
committee[settings.LEVEL_FIELD] = \
legislator[settings.LEVEL_FIELD]
committee['members'] = []
committee['sources'] = []
if 'subcommittee' not in committee:
committee['subcommittee'] = None
insert_with_id(committee)
for member in committee['members']:
if member['leg_id'] == legislator['leg_id']:
break
else:
committee['members'].append(
{'name': legislator['full_name'],
'leg_id': legislator['leg_id'],
'role': role.get('position') or 'member'})
for source in legislator['sources']:
if source not in committee['sources']:
committee['sources'].append(source)
db.committees.save(committee, safe=True)
role['committee_id'] = committee['_id']
db.legislators.save(legislator, safe=True) | python | def import_committees_from_legislators(current_term, abbr):
""" create committees from legislators that have committee roles """
# for all current legislators
for legislator in db.legislators.find({'roles': {'$elemMatch': {
'term': current_term, settings.LEVEL_FIELD: abbr}}}):
# for all committee roles
for role in legislator['roles']:
if (role['type'] == 'committee member' and
'committee_id' not in role):
spec = {settings.LEVEL_FIELD: abbr,
'chamber': role['chamber'],
'committee': role['committee']}
if 'subcommittee' in role:
spec['subcommittee'] = role['subcommittee']
committee = db.committees.find_one(spec)
if not committee:
committee = spec
committee['_type'] = 'committee'
# copy LEVEL_FIELD from legislator to committee
committee[settings.LEVEL_FIELD] = \
legislator[settings.LEVEL_FIELD]
committee['members'] = []
committee['sources'] = []
if 'subcommittee' not in committee:
committee['subcommittee'] = None
insert_with_id(committee)
for member in committee['members']:
if member['leg_id'] == legislator['leg_id']:
break
else:
committee['members'].append(
{'name': legislator['full_name'],
'leg_id': legislator['leg_id'],
'role': role.get('position') or 'member'})
for source in legislator['sources']:
if source not in committee['sources']:
committee['sources'].append(source)
db.committees.save(committee, safe=True)
role['committee_id'] = committee['_id']
db.legislators.save(legislator, safe=True) | create committees from legislators that have committee roles | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/committees.py#L16-L63 |
openstates/billy | billy/scrape/bills.py | Bill.add_sponsor | def add_sponsor(self, type, name, **kwargs):
"""
Associate a sponsor with this bill.
:param type: the type of sponsorship, e.g. 'primary', 'cosponsor'
:param name: the name of the sponsor as provided by the official source
"""
self['sponsors'].append(dict(type=type, name=name, **kwargs)) | python | def add_sponsor(self, type, name, **kwargs):
"""
Associate a sponsor with this bill.
:param type: the type of sponsorship, e.g. 'primary', 'cosponsor'
:param name: the name of the sponsor as provided by the official source
"""
self['sponsors'].append(dict(type=type, name=name, **kwargs)) | Associate a sponsor with this bill.
:param type: the type of sponsorship, e.g. 'primary', 'cosponsor'
:param name: the name of the sponsor as provided by the official source | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/bills.py#L71-L78 |
openstates/billy | billy/scrape/bills.py | Bill.add_version | def add_version(self, name, url, mimetype=None, on_duplicate='error',
**kwargs):
"""
Add a version of the text of this bill.
:param name: a name given to this version of the text, e.g.
'As Introduced', 'Version 2', 'As amended', 'Enrolled'
:param url: the location of this version on the legislative website.
:param mimetype: MIME type of the document
:param on_duplicate: What to do if a duplicate is seen:
error - default option, raises a ValueError
ignore - add the document twice (rarely the right choice)
use_new - use the new name, removing the old document
use_old - use the old name, not adding the new document
If multiple formats are provided, a good rule of thumb is to
prefer text, followed by html, followed by pdf/word/etc.
"""
if not mimetype:
raise ValueError('mimetype parameter to add_version is required')
if on_duplicate != 'ignore':
if url in self._seen_versions:
if on_duplicate == 'error':
raise ValueError('duplicate version url %s' % url)
elif on_duplicate == 'use_new':
# delete the old version
self['versions'] = [v for v in self['versions']
if v['url'] != url]
elif on_duplicate == 'use_old':
return # do nothing
self._seen_versions.add(url)
d = dict(name=name, url=url, mimetype=mimetype, **kwargs)
self['versions'].append(d) | python | def add_version(self, name, url, mimetype=None, on_duplicate='error',
**kwargs):
"""
Add a version of the text of this bill.
:param name: a name given to this version of the text, e.g.
'As Introduced', 'Version 2', 'As amended', 'Enrolled'
:param url: the location of this version on the legislative website.
:param mimetype: MIME type of the document
:param on_duplicate: What to do if a duplicate is seen:
error - default option, raises a ValueError
ignore - add the document twice (rarely the right choice)
use_new - use the new name, removing the old document
use_old - use the old name, not adding the new document
If multiple formats are provided, a good rule of thumb is to
prefer text, followed by html, followed by pdf/word/etc.
"""
if not mimetype:
raise ValueError('mimetype parameter to add_version is required')
if on_duplicate != 'ignore':
if url in self._seen_versions:
if on_duplicate == 'error':
raise ValueError('duplicate version url %s' % url)
elif on_duplicate == 'use_new':
# delete the old version
self['versions'] = [v for v in self['versions']
if v['url'] != url]
elif on_duplicate == 'use_old':
return # do nothing
self._seen_versions.add(url)
d = dict(name=name, url=url, mimetype=mimetype, **kwargs)
self['versions'].append(d) | Add a version of the text of this bill.
:param name: a name given to this version of the text, e.g.
'As Introduced', 'Version 2', 'As amended', 'Enrolled'
:param url: the location of this version on the legislative website.
:param mimetype: MIME type of the document
:param on_duplicate: What to do if a duplicate is seen:
error - default option, raises a ValueError
ignore - add the document twice (rarely the right choice)
use_new - use the new name, removing the old document
use_old - use the old name, not adding the new document
If multiple formats are provided, a good rule of thumb is to
prefer text, followed by html, followed by pdf/word/etc. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/bills.py#L99-L132 |
openstates/billy | billy/scrape/bills.py | Bill.add_action | def add_action(self, actor, action, date, type=None, committees=None,
legislators=None, **kwargs):
"""
Add an action that was performed on this bill.
:param actor: a string representing who performed the action.
If the action is associated with one of the chambers this
should be 'upper' or 'lower'. Alternatively, this could be
the name of a committee, a specific legislator, or an outside
actor such as 'Governor'.
:param action: a string representing the action performed, e.g.
'Introduced', 'Signed by the Governor', 'Amended'
:param date: the date/time this action was performed.
:param type: a type classification for this action
;param committees: a committee or list of committees to associate with
this action
"""
def _cleanup_list(obj, default):
if not obj:
obj = default
elif isinstance(obj, string_types):
obj = [obj]
elif not isinstance(obj, list):
obj = list(obj)
return obj
type = _cleanup_list(type, ['other'])
committees = _cleanup_list(committees, [])
legislators = _cleanup_list(legislators, [])
if 'committee' in kwargs:
raise ValueError("invalid param 'committee' passed to add_action, "
"must use committees")
if isinstance(committees, string_types):
committees = [committees]
related_entities = [] # OK, let's work some magic.
for committee in committees:
related_entities.append({
"type": "committee",
"name": committee
})
for legislator in legislators:
related_entities.append({
"type": "legislator",
"name": legislator
})
self['actions'].append(dict(actor=actor, action=action,
date=date, type=type,
related_entities=related_entities,
**kwargs)) | python | def add_action(self, actor, action, date, type=None, committees=None,
legislators=None, **kwargs):
"""
Add an action that was performed on this bill.
:param actor: a string representing who performed the action.
If the action is associated with one of the chambers this
should be 'upper' or 'lower'. Alternatively, this could be
the name of a committee, a specific legislator, or an outside
actor such as 'Governor'.
:param action: a string representing the action performed, e.g.
'Introduced', 'Signed by the Governor', 'Amended'
:param date: the date/time this action was performed.
:param type: a type classification for this action
;param committees: a committee or list of committees to associate with
this action
"""
def _cleanup_list(obj, default):
if not obj:
obj = default
elif isinstance(obj, string_types):
obj = [obj]
elif not isinstance(obj, list):
obj = list(obj)
return obj
type = _cleanup_list(type, ['other'])
committees = _cleanup_list(committees, [])
legislators = _cleanup_list(legislators, [])
if 'committee' in kwargs:
raise ValueError("invalid param 'committee' passed to add_action, "
"must use committees")
if isinstance(committees, string_types):
committees = [committees]
related_entities = [] # OK, let's work some magic.
for committee in committees:
related_entities.append({
"type": "committee",
"name": committee
})
for legislator in legislators:
related_entities.append({
"type": "legislator",
"name": legislator
})
self['actions'].append(dict(actor=actor, action=action,
date=date, type=type,
related_entities=related_entities,
**kwargs)) | Add an action that was performed on this bill.
:param actor: a string representing who performed the action.
If the action is associated with one of the chambers this
should be 'upper' or 'lower'. Alternatively, this could be
the name of a committee, a specific legislator, or an outside
actor such as 'Governor'.
:param action: a string representing the action performed, e.g.
'Introduced', 'Signed by the Governor', 'Amended'
:param date: the date/time this action was performed.
:param type: a type classification for this action
;param committees: a committee or list of committees to associate with
this action | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/bills.py#L134-L188 |
openstates/billy | billy/scrape/bills.py | Bill.add_companion | def add_companion(self, bill_id, session=None, chamber=None):
"""
Associate another bill with this one.
If session isn't set it will be set to self['session'].
"""
companion = {'bill_id': bill_id,
'session': session or self['session'],
'chamber': chamber}
self['companions'].append(companion) | python | def add_companion(self, bill_id, session=None, chamber=None):
"""
Associate another bill with this one.
If session isn't set it will be set to self['session'].
"""
companion = {'bill_id': bill_id,
'session': session or self['session'],
'chamber': chamber}
self['companions'].append(companion) | Associate another bill with this one.
If session isn't set it will be set to self['session']. | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/scrape/bills.py#L203-L212 |
openstates/billy | billy/web/public/views/misc.py | homepage | def homepage(request):
'''
Context:
all_metadata
Templates:
- billy/web/public/homepage.html
'''
all_metadata = db.metadata.find()
return render(request, templatename('homepage'),
dict(all_metadata=all_metadata)) | python | def homepage(request):
'''
Context:
all_metadata
Templates:
- billy/web/public/homepage.html
'''
all_metadata = db.metadata.find()
return render(request, templatename('homepage'),
dict(all_metadata=all_metadata)) | Context:
all_metadata
Templates:
- billy/web/public/homepage.html | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/misc.py#L24-L35 |
openstates/billy | billy/web/public/views/misc.py | downloads | def downloads(request):
'''
Context:
- all_metadata
Templates:
- billy/web/public/downloads.html
'''
all_metadata = sorted(db.metadata.find(), key=lambda x: x['name'])
return render(request, 'billy/web/public/downloads.html',
{'all_metadata': all_metadata}) | python | def downloads(request):
'''
Context:
- all_metadata
Templates:
- billy/web/public/downloads.html
'''
all_metadata = sorted(db.metadata.find(), key=lambda x: x['name'])
return render(request, 'billy/web/public/downloads.html',
{'all_metadata': all_metadata}) | Context:
- all_metadata
Templates:
- billy/web/public/downloads.html | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/misc.py#L38-L48 |
openstates/billy | billy/web/public/views/misc.py | find_your_legislator | def find_your_legislator(request):
'''
Context:
- request
- lat
- long
- located
- legislators
Templates:
- billy/web/public/find_your_legislator_table.html
'''
# check if lat/lon are set
# if leg_search is set, they most likely don't have ECMAScript enabled.
# XXX: fallback behavior here for alpha.
get = request.GET
context = {}
template = 'find_your_legislator'
context['request'] = ""
if "q" in get:
context['request'] = get['q']
if "lat" in get and "lon" in get:
# We've got a passed lat/lon. Let's build off it.
lat = get['lat']
lon = get['lon']
context['lat'] = lat
context['lon'] = lon
context['located'] = True
qurl = "%slegislators/geo/?long=%s&lat=%s&apikey=%s" % (
billy_settings.API_BASE_URL,
lon,
lat,
getattr(billy_settings, 'API_KEY', '')
)
leg_resp = json.load(urllib2.urlopen(qurl, timeout=0.5))
# allow limiting lookup to region for region map views
if 'abbr' in get:
leg_resp = [leg for leg in leg_resp
if leg[billy_settings.LEVEL_FIELD] == get['abbr']]
context['abbr'] = get['abbr']
# Also, allow filtering by chamber
if 'chamber' in get:
leg_resp = [leg for leg in leg_resp
if leg['chamber'] == get['chamber']]
context['chamber'] = get['chamber']
if "boundary" in get:
return HttpResponse(json.dumps([]))
context['legislators'] = map(Legislator, leg_resp)
template = 'find_your_legislator_table'
return render(request, templatename(template), context) | python | def find_your_legislator(request):
'''
Context:
- request
- lat
- long
- located
- legislators
Templates:
- billy/web/public/find_your_legislator_table.html
'''
# check if lat/lon are set
# if leg_search is set, they most likely don't have ECMAScript enabled.
# XXX: fallback behavior here for alpha.
get = request.GET
context = {}
template = 'find_your_legislator'
context['request'] = ""
if "q" in get:
context['request'] = get['q']
if "lat" in get and "lon" in get:
# We've got a passed lat/lon. Let's build off it.
lat = get['lat']
lon = get['lon']
context['lat'] = lat
context['lon'] = lon
context['located'] = True
qurl = "%slegislators/geo/?long=%s&lat=%s&apikey=%s" % (
billy_settings.API_BASE_URL,
lon,
lat,
getattr(billy_settings, 'API_KEY', '')
)
leg_resp = json.load(urllib2.urlopen(qurl, timeout=0.5))
# allow limiting lookup to region for region map views
if 'abbr' in get:
leg_resp = [leg for leg in leg_resp
if leg[billy_settings.LEVEL_FIELD] == get['abbr']]
context['abbr'] = get['abbr']
# Also, allow filtering by chamber
if 'chamber' in get:
leg_resp = [leg for leg in leg_resp
if leg['chamber'] == get['chamber']]
context['chamber'] = get['chamber']
if "boundary" in get:
return HttpResponse(json.dumps([]))
context['legislators'] = map(Legislator, leg_resp)
template = 'find_your_legislator_table'
return render(request, templatename(template), context) | Context:
- request
- lat
- long
- located
- legislators
Templates:
- billy/web/public/find_your_legislator_table.html | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/misc.py#L51-L110 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.