sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node | Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict | entailment |
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes | Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict | entailment |
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes) | Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame | entailment |
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)) | Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame | entailment |
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values) | Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways. | entailment |
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs | Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters. | entailment |
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal | Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame | entailment |
def read_lines(in_file):
"""Returns a list of lines from a input markdown file."""
with open(in_file, 'r') as inf:
in_contents = inf.read().split('\n')
return in_contents | Returns a list of lines from a input markdown file. | entailment |
def remove_lines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out | Removes existing [back to top] links and <a id> tags. | entailment |
def slugify_headline(line, remove_dashes=False):
"""
Takes a header line from a Markdown document and
returns a tuple of the
'#'-stripped version of the head line,
a string version for <a id=''></a> anchor tags,
and the level of the headline as integer.
E.g.,
>>> dashify_headline('### some header lvl3')
('Some header lvl3', 'some-header-lvl3', 3)
"""
stripped_right = line.rstrip('#')
stripped_both = stripped_right.lstrip('#')
level = len(stripped_right) - len(stripped_both)
stripped_wspace = stripped_both.strip()
# character replacements
replaced_colon = stripped_wspace.replace('.', '')
replaced_slash = replaced_colon.replace('/', '')
rem_nonvalids = ''.join([c if c in VALIDS
else '-' for c in replaced_slash])
lowered = rem_nonvalids.lower()
slugified = re.sub(r'(-)\1+', r'\1', lowered) # remove duplicate dashes
slugified = slugified.strip('-') # strip dashes from start and end
# exception '&' (double-dash in github)
slugified = slugified.replace('-&-', '--')
if remove_dashes:
slugified = slugified.replace('-','')
return [stripped_wspace, slugified, level] | Takes a header line from a Markdown document and
returns a tuple of the
'#'-stripped version of the head line,
a string version for <a id=''></a> anchor tags,
and the level of the headline as integer.
E.g.,
>>> dashify_headline('### some header lvl3')
('Some header lvl3', 'some-header-lvl3', 3) | entailment |
def tag_and_collect(lines, id_tag=True, back_links=False, exclude_h=None, remove_dashes=False):
"""
Gets headlines from the markdown document and creates anchor tags.
Keyword arguments:
lines: a list of sublists where every sublist
represents a line from a Markdown document.
id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub)
back_links: if true, adds "back to top" links below each headline
exclude_h: header levels to exclude. E.g., [2, 3]
excludes level 2 and 3 headings.
Returns a tuple of 2 lists:
1st list:
A modified version of the input list where
<a id="some-header"></a> anchor tags where inserted
above the header lines (if github is False).
2nd list:
A list of 3-value sublists, where the first value
represents the heading, the second value the string
that was inserted assigned to the IDs in the anchor tags,
and the third value is an integer that reprents the headline level.
E.g.,
[['some header lvl3', 'some-header-lvl3', 3], ...]
"""
out_contents = []
headlines = []
for l in lines:
saw_headline = False
orig_len = len(l)
l = l.lstrip()
if l.startswith(('# ', '## ', '### ', '#### ', '##### ', '###### ')):
# comply with new markdown standards
# not a headline if '#' not followed by whitespace '##no-header':
if not l.lstrip('#').startswith(' '):
continue
# not a headline if more than 6 '#':
if len(l) - len(l.lstrip('#')) > 6:
continue
# headers can be indented by at most 3 spaces:
if orig_len - len(l) > 3:
continue
# ignore empty headers
if not set(l) - {'#', ' '}:
continue
saw_headline = True
slugified = slugify_headline(l, remove_dashes)
if not exclude_h or not slugified[-1] in exclude_h:
if id_tag:
id_tag = '<a class="mk-toclify" id="%s"></a>'\
% (slugified[1])
out_contents.append(id_tag)
headlines.append(slugified)
out_contents.append(l)
if back_links and saw_headline:
out_contents.append('[[back to top](#table-of-contents)]')
return out_contents, headlines | Gets headlines from the markdown document and creates anchor tags.
Keyword arguments:
lines: a list of sublists where every sublist
represents a line from a Markdown document.
id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub)
back_links: if true, adds "back to top" links below each headline
exclude_h: header levels to exclude. E.g., [2, 3]
excludes level 2 and 3 headings.
Returns a tuple of 2 lists:
1st list:
A modified version of the input list where
<a id="some-header"></a> anchor tags where inserted
above the header lines (if github is False).
2nd list:
A list of 3-value sublists, where the first value
represents the heading, the second value the string
that was inserted assigned to the IDs in the anchor tags,
and the third value is an integer that reprents the headline level.
E.g.,
[['some header lvl3', 'some-header-lvl3', 3], ...] | entailment |
def positioning_headlines(headlines):
"""
Strips unnecessary whitespaces/tabs if first header is not left-aligned
"""
left_just = False
for row in headlines:
if row[-1] == 1:
left_just = True
break
if not left_just:
for row in headlines:
row[-1] -= 1
return headlines | Strips unnecessary whitespaces/tabs if first header is not left-aligned | entailment |
def create_toc(headlines, hyperlink=True, top_link=False, no_toc_header=False):
"""
Creates the table of contents from the headline list
that was returned by the tag_and_collect function.
Keyword Arguments:
headlines: list of lists
e.g., ['Some header lvl3', 'some-header-lvl3', 3]
hyperlink: Creates hyperlinks in Markdown format if True,
e.g., '- [Some header lvl1](#some-header-lvl1)'
top_link: if True, add a id tag for linking the table
of contents itself (for the back-to-top-links)
no_toc_header: suppresses TOC header if True.
Returns a list of headlines for a table of contents
in Markdown format,
e.g., [' - [Some header lvl3](#some-header-lvl3)', ...]
"""
processed = []
if not no_toc_header:
if top_link:
processed.append('<a class="mk-toclify" id="table-of-contents"></a>\n')
processed.append('# Table of Contents')
for line in headlines:
if hyperlink:
item = '%s- [%s](#%s)' % ((line[2]-1)*' ', line[0], line[1])
else:
item = '%s- %s' % ((line[2]-1)*' ', line[0])
processed.append(item)
processed.append('\n')
return processed | Creates the table of contents from the headline list
that was returned by the tag_and_collect function.
Keyword Arguments:
headlines: list of lists
e.g., ['Some header lvl3', 'some-header-lvl3', 3]
hyperlink: Creates hyperlinks in Markdown format if True,
e.g., '- [Some header lvl1](#some-header-lvl1)'
top_link: if True, add a id tag for linking the table
of contents itself (for the back-to-top-links)
no_toc_header: suppresses TOC header if True.
Returns a list of headlines for a table of contents
in Markdown format,
e.g., [' - [Some header lvl3](#some-header-lvl3)', ...] | entailment |
def build_markdown(toc_headlines, body, spacer=0, placeholder=None):
"""
Returns a string with the Markdown output contents incl.
the table of contents.
Keyword arguments:
toc_headlines: lines for the table of contents
as created by the create_toc function.
body: contents of the Markdown file including
ID-anchor tags as returned by the
tag_and_collect function.
spacer: Adds vertical space after the table
of contents. Height in pixels.
placeholder: If a placeholder string is provided, the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document
"""
if spacer:
spacer_line = ['\n<div style="height:%spx;"></div>\n' % (spacer)]
toc_markdown = "\n".join(toc_headlines + spacer_line)
else:
toc_markdown = "\n".join(toc_headlines)
body_markdown = "\n".join(body).strip()
if placeholder:
markdown = body_markdown.replace(placeholder, toc_markdown)
else:
markdown = toc_markdown + body_markdown
return markdown | Returns a string with the Markdown output contents incl.
the table of contents.
Keyword arguments:
toc_headlines: lines for the table of contents
as created by the create_toc function.
body: contents of the Markdown file including
ID-anchor tags as returned by the
tag_and_collect function.
spacer: Adds vertical space after the table
of contents. Height in pixels.
placeholder: If a placeholder string is provided, the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document | entailment |
def output_markdown(markdown_cont, output_file):
"""
Writes to an output file if `outfile` is a valid path.
"""
if output_file:
with open(output_file, 'w') as out:
out.write(markdown_cont) | Writes to an output file if `outfile` is a valid path. | entailment |
def markdown_toclify(input_file, output_file=None, github=False,
back_to_top=False, nolink=False,
no_toc_header=False, spacer=0, placeholder=None,
exclude_h=None, remove_dashes=False):
""" Function to add table of contents to markdown files.
Parameters
-----------
input_file: str
Path to the markdown input file.
output_file: str (defaul: None)
Path to the markdown output file.
github: bool (default: False)
Uses GitHub TOC syntax if True.
back_to_top: bool (default: False)
Inserts back-to-top links below headings if True.
nolink: bool (default: False)
Creates the table of contents without internal links if True.
no_toc_header: bool (default: False)
Suppresses the Table of Contents header if True
spacer: int (default: 0)
Inserts horizontal space (in pixels) after the table of contents.
placeholder: str (default: None)
Inserts the TOC at the placeholder string instead
of inserting the TOC at the top of the document.
exclude_h: list (default None)
Excludes header levels, e.g., if [2, 3], ignores header
levels 2 and 3 in the TOC.
remove_dashes: bool (default: False)
Removes dashes from headline slugs
Returns
-----------
cont: str
Markdown contents including the TOC.
"""
raw_contents = read_lines(input_file)
cleaned_contents = remove_lines(raw_contents, remove=('[[back to top]', '<a class="mk-toclify"'))
processed_contents, raw_headlines = tag_and_collect(
cleaned_contents,
id_tag=not github,
back_links=back_to_top,
exclude_h=exclude_h,
remove_dashes=remove_dashes
)
leftjustified_headlines = positioning_headlines(raw_headlines)
processed_headlines = create_toc(leftjustified_headlines,
hyperlink=not nolink,
top_link=not nolink and not github,
no_toc_header=no_toc_header)
if nolink:
processed_contents = cleaned_contents
cont = build_markdown(toc_headlines=processed_headlines,
body=processed_contents,
spacer=spacer,
placeholder=placeholder)
if output_file:
output_markdown(cont, output_file)
return cont | Function to add table of contents to markdown files.
Parameters
-----------
input_file: str
Path to the markdown input file.
output_file: str (defaul: None)
Path to the markdown output file.
github: bool (default: False)
Uses GitHub TOC syntax if True.
back_to_top: bool (default: False)
Inserts back-to-top links below headings if True.
nolink: bool (default: False)
Creates the table of contents without internal links if True.
no_toc_header: bool (default: False)
Suppresses the Table of Contents header if True
spacer: int (default: 0)
Inserts horizontal space (in pixels) after the table of contents.
placeholder: str (default: None)
Inserts the TOC at the placeholder string instead
of inserting the TOC at the top of the document.
exclude_h: list (default None)
Excludes header levels, e.g., if [2, 3], ignores header
levels 2 and 3 in the TOC.
remove_dashes: bool (default: False)
Removes dashes from headline slugs
Returns
-----------
cont: str
Markdown contents including the TOC. | entailment |
def url_parse(name):
"""parse urls with different prefixes"""
position = name.find("github.com")
if position >= 0:
if position != 0:
position_1 = name.find("www.github.com")
position_2 = name.find("http://github.com")
position_3 = name.find("https://github.com")
if position_1*position_2*position_3 != 0:
exception()
sys.exit(0)
name = name[position+11:]
if name.endswith('/'):
name = name[:-1]
return name
else:
if name.endswith('/'):
name = name[:-1]
return name | parse urls with different prefixes | entailment |
def get_req(url):
"""simple get request"""
request = urllib.request.Request(url)
request.add_header('Authorization', 'token %s' % API_TOKEN)
try:
response = urllib.request.urlopen(request).read().decode('utf-8')
return response
except urllib.error.HTTPError:
exception()
sys.exit(0) | simple get request | entailment |
def geturl_req(url):
"""get request that returns 302"""
request = urllib.request.Request(url)
request.add_header('Authorization', 'token %s' % API_TOKEN)
try:
response_url = urllib.request.urlopen(request).geturl()
return response_url
except urllib.error.HTTPError:
exception()
sys.exit(0) | get request that returns 302 | entailment |
def main():
"""main function"""
parser = argparse.ArgumentParser(
description='Github within the Command Line')
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--url', type=str,
help="Get repos from the user profile's URL")
group.add_argument('-r', '--recursive', type=str,
help="Get the file structure from the repo link")
group.add_argument('-R', '--readme', type=str,
help="Get the raw version of the repo readme from repo link")
group.add_argument('-re', '--releases', type=str,
help="Get the list of releases from repo link")
group.add_argument('-dt', '--tarball', type=str,
help="Download the tarball of the given repo")
group.add_argument('-dz', '--zipball', type=str,
help="Download the zipball of the given repo")
group.add_argument('-op', '--openfile', type=str,
help="Show the contents of the given file in a repo")
group.add_argument('-f', '--followers', type=str,
help="Get followers of the user")
group.add_argument('-fo', '--following', type=str,
help="Get people following the user")
group.add_argument('-c', '--contributors', type=str,
help="Get contributors of a repo")
if len(sys.argv) == 1:
parser.print_help()
return
args = parser.parse_args()
# URL
if args.url:
name = url_parse(args.url)
url = GITHUB_API + 'users/' + name + '/repos'
# TREE
if args.recursive:
name = url_parse(args.recursive)
url = GITHUB_API + 'repos/' + name + '/branches/master'
response = get_req(url)
jsondata = json.loads(response)
sha = jsondata['commit']['commit']['tree']['sha']
url = GITHUB_API + 'repos/' + name + '/git/trees/' + sha + '?recursive=1'
# README
if args.readme:
name = url_parse(args.readme)
url = GITHUB_API + 'repos/' + name + '/readme'
# RELEASES
if args.releases:
name = url_parse(args.releases)
url = GITHUB_API + 'repos/' + name + '/releases'
# TARBALL/ZIPBALL
if args.tarball or args.zipball:
if args.tarball:
key = '/tarball/'
name = url_parse(args.tarball)
if args.zipball:
key = '/zipball/'
name = url_parse(args.zipball)
url = GITHUB_API + 'repos/' + name + key + 'master'
# OPEN ONE FILE
if args.openfile:
name = url_parse(args.openfile)
position = name.find('/')
user = name[:position+1]
rest = name[position+1:]
position = rest.find('/')
repo = rest[:position+1]
rest = rest[position+1:]
url = GITHUB_API + 'repos/' + user + repo + 'contents/' + rest
# GET RESPONSES
# TARBALL/ZIPBALL
if args.tarball or args.zipball:
response_url = geturl_req(url)
position = name.find('/')
name = name[position+1:]
if args.tarball:
name = name+'.tar.gz'
if args.zipball:
name = name+'.zip'
print("\nDownloading " + name + '...\n')
urllib.request.urlretrieve(response_url, name)
print(name + ' has been saved\n')
return
# FOLLOWERS
if args.followers:
name = url_parse(args.followers)
url = GITHUB_API + 'users/' + name + '/followers'
#FOLLOWING
if args.following:
name = url_parse(args.following)
url = GITHUB_API + 'users/' + name + '/following'
#CONTRIBUTORS
if args.contributors:
name = url_parse(args.contributors)
url = GITHUB_API + 'repos/' + name + '/contributors'
# OTHER OPTIONS
response = get_req(url)
jsondata = json.loads(response)
# USERNAME and URL
if args.url:
table = PrettyTable([" Repository ", "★ Star"])
table.align[" Repository "] = "l"
for i in jsondata:
table.add_row([i['name'], i['stargazers_count']])
print(table)
# RECURSIVE TREE
if args.recursive:
table = PrettyTable([" File/Folder ", " Size (Bytes) "])
table.align[" File/Folder "] = "l"
for i in jsondata['tree']:
size = '-'
path = i['path']+'/'
if i['type'] == 'blob':
size = i['size']
path = path[:-1]
table.add_row([path, size])
print(table)
# README
if args.readme:
print(base64.b64decode(jsondata['content']).decode('utf-8'))
# RELEASES
if args.releases:
table = PrettyTable([" Release name ", " Release Date ", " Release Time "])
for i in jsondata:
time = str(dateutil.parser.parse(i['published_at']))
date = time[:10]
time = time[11:]
time = time[:5]
time = time + ' UTC'
table.add_row([i['tag_name'], date, time])
print(table)
# OPEN ONE FILE
if args.openfile:
try:
print(base64.b64decode(jsondata['content']).decode('utf-8'))
return
except:
print("\nDirectory URL was given, hence its contents will be displayed\n")
table = PrettyTable(["Folder Contents"])
for i in jsondata:
table.add_row([i['name']])
print(table)
# GET FOLLOWERS
if args.followers:
table = PrettyTable([" FOLLOWERS "])
table.align[" FOLLOWERS "] = "l"
for i in jsondata:
table.add_row([i['login']])
print("Number of followers:"+str(len(jsondata)))
print(table)
# GET FOLLOWING
if args.following:
table = PrettyTable([" FOLLOWING "])
table.align[" FOLLOWING "] = "l"
for i in jsondata:
table.add_row([i['login']])
print("Number of following:"+str(len(jsondata)))
print(table)
# GET CONTRIBUTORS
if args.contributors:
table = PrettyTable([" CONTRIBUTORS "])
table.align[" CONTRIBUTORS "] = "l"
for i in jsondata:
table.add_row([i['login']])
print("Number of contributors:"+str(len(jsondata)))
print(table) | main function | entailment |
def do_capture(parser, token):
"""
Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" />
"""
bits = token.split_contents()
# tokens
t_as = 'as'
t_silent = 'silent'
var = 'capture'
silent = False
num_bits = len(bits)
if len(bits) > 4:
raise TemplateSyntaxError("'capture' node supports '[as variable] [silent]' parameters.")
elif num_bits == 4:
t_name, t_as, var, t_silent = bits
silent = True
elif num_bits == 3:
t_name, t_as, var = bits
elif num_bits == 2:
t_name, t_silent = bits
silent = True
else:
var = 'capture'
silent = False
if t_silent != 'silent' or t_as != 'as':
raise TemplateSyntaxError("'capture' node expects 'as variable' or 'silent' syntax.")
nodelist = parser.parse(('endcapture',))
parser.delete_first_token()
return CaptureNode(nodelist, var, silent) | Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" /> | entailment |
def _parse_special_fields(self, data):
"""
Helper method that parses special fields to Python objects
:param data: response from Monzo API request
:type data: dict
"""
self.created = parse_date(data.pop('created'))
if data.get('settled'): # Not always returned
self.settled = parse_date(data.pop('settled'))
# Merchant field can contain either merchant ID or the whole object
if (data.get('merchant') and
not isinstance(data['merchant'], six.text_type)):
self.merchant = MonzoMerchant(data=data.pop('merchant')) | Helper method that parses special fields to Python objects
:param data: response from Monzo API request
:type data: dict | entailment |
def _save_token_on_disk(self):
"""Helper function that saves the token on disk"""
token = self._token.copy()
# Client secret is needed for token refreshing and isn't returned
# as a pared of OAuth token by default
token.update(client_secret=self._client_secret)
with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f:
json.dump(
token, f,
ensure_ascii=False,
sort_keys=True,
indent=4,
) | Helper function that saves the token on disk | entailment |
def _get_oauth_token(self):
"""
Get Monzo access token via OAuth2 `authorization code` grant type.
Official docs:
https://monzo.com/docs/#acquire-an-access-token
:returns: OAuth 2 access token
:rtype: dict
"""
url = urljoin(self.api_url, '/oauth2/token')
oauth = OAuth2Session(
client_id=self._client_id,
redirect_uri=config.REDIRECT_URI,
)
token = oauth.fetch_token(
token_url=url,
code=self._auth_code,
client_secret=self._client_secret,
)
return token | Get Monzo access token via OAuth2 `authorization code` grant type.
Official docs:
https://monzo.com/docs/#acquire-an-access-token
:returns: OAuth 2 access token
:rtype: dict | entailment |
def _refresh_oath_token(self):
"""
Refresh Monzo OAuth 2 token.
Official docs:
https://monzo.com/docs/#refreshing-access
:raises UnableToRefreshTokenException: when token couldn't be refreshed
"""
url = urljoin(self.api_url, '/oauth2/token')
data = {
'grant_type': 'refresh_token',
'client_id': self._client_id,
'client_secret': self._client_secret,
'refresh_token': self._token['refresh_token'],
}
token_response = requests.post(url, data=data)
token = token_response.json()
# Not ideal, but that's how Monzo API returns errors
if 'error' in token:
raise CantRefreshTokenError(
"Unable to refresh the token: {}".format(token)
)
self._token = token
self._save_token_on_disk() | Refresh Monzo OAuth 2 token.
Official docs:
https://monzo.com/docs/#refreshing-access
:raises UnableToRefreshTokenException: when token couldn't be refreshed | entailment |
def _get_response(self, method, endpoint, params=None):
"""
Helper method to handle HTTP requests and catch API errors
:param method: valid HTTP method
:type method: str
:param endpoint: API endpoint
:type endpoint: str
:param params: extra parameters passed with the request
:type params: dict
:returns: API response
:rtype: Response
"""
url = urljoin(self.api_url, endpoint)
try:
response = getattr(self._session, method)(url, params=params)
# Check if Monzo API returned HTTP 401, which could mean that the
# token is expired
if response.status_code == 401:
raise TokenExpiredError
except TokenExpiredError:
# For some reason 'requests-oauthlib' automatic token refreshing
# doesn't work so we do it here semi-manually
self._refresh_oath_token()
self._session = OAuth2Session(
client_id=self._client_id,
token=self._token,
)
response = getattr(self._session, method)(url, params=params)
if response.status_code != requests.codes.ok:
raise MonzoAPIError(
"Something went wrong: {}".format(response.json())
)
return response | Helper method to handle HTTP requests and catch API errors
:param method: valid HTTP method
:type method: str
:param endpoint: API endpoint
:type endpoint: str
:param params: extra parameters passed with the request
:type params: dict
:returns: API response
:rtype: Response | entailment |
def whoami(self):
"""
Get information about the access token.
Official docs:
https://monzo.com/docs/#authenticating-requests
:returns: access token details
:rtype: dict
"""
endpoint = '/ping/whoami'
response = self._get_response(
method='get', endpoint=endpoint,
)
return response.json() | Get information about the access token.
Official docs:
https://monzo.com/docs/#authenticating-requests
:returns: access token details
:rtype: dict | entailment |
def accounts(self, refresh=False):
"""
Returns a list of accounts owned by the currently authorised user.
It's often used when deciding whether to require explicit account ID
or use the only available one, so we cache the response by default.
Official docs:
https://monzo.com/docs/#list-accounts
:param refresh: decides if the accounts information should be refreshed
:type refresh: bool
:returns: list of Monzo accounts
:rtype: list of MonzoAccount
"""
if not refresh and self._cached_accounts:
return self._cached_accounts
endpoint = '/accounts'
response = self._get_response(
method='get', endpoint=endpoint,
)
accounts_json = response.json()['accounts']
accounts = [MonzoAccount(data=account) for account in accounts_json]
self._cached_accounts = accounts
return accounts | Returns a list of accounts owned by the currently authorised user.
It's often used when deciding whether to require explicit account ID
or use the only available one, so we cache the response by default.
Official docs:
https://monzo.com/docs/#list-accounts
:param refresh: decides if the accounts information should be refreshed
:type refresh: bool
:returns: list of Monzo accounts
:rtype: list of MonzoAccount | entailment |
def balance(self, account_id=None):
"""
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
"""
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/balance'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
return MonzoBalance(data=response.json()) | Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance | entailment |
def pots(self, refresh=False):
"""
Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot
"""
if not refresh and self._cached_pots:
return self._cached_pots
endpoint = '/pots/listV1'
response = self._get_response(
method='get', endpoint=endpoint,
)
pots_json = response.json()['pots']
pots = [MonzoPot(data=pot) for pot in pots_json]
self._cached_pots = pots
return pots | Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot | entailment |
def transactions(self, account_id=None, reverse=True, limit=None):
"""
Returns a list of transactions on the user's account.
Official docs:
https://monzo.com/docs/#list-transactions
:param account_id: Monzo account ID
:type account_id: str
:param reverse: whether transactions should be in in descending order
:type reverse: bool
:param limit: how many transactions should be returned; None for all
:type limit: int
:returns: list of Monzo transactions
:rtype: list of MonzoTransaction
"""
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/transactions'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
# The API does not allow reversing the list or limiting it, so to do
# the basic query of 'get the latest transaction' we need to always get
# all transactions and do the reversing and slicing in Python
# I send Monzo an email, we'll se how they'll respond
transactions = response.json()['transactions']
if reverse:
transactions.reverse()
if limit:
transactions = transactions[:limit]
return [MonzoTransaction(data=t) for t in transactions] | Returns a list of transactions on the user's account.
Official docs:
https://monzo.com/docs/#list-transactions
:param account_id: Monzo account ID
:type account_id: str
:param reverse: whether transactions should be in in descending order
:type reverse: bool
:param limit: how many transactions should be returned; None for all
:type limit: int
:returns: list of Monzo transactions
:rtype: list of MonzoTransaction | entailment |
def transaction(self, transaction_id, expand_merchant=False):
"""
Returns an individual transaction, fetched by its id.
Official docs:
https://monzo.com/docs/#retrieve-transaction
:param transaction_id: Monzo transaction ID
:type transaction_id: str
:param expand_merchant: whether merchant data should be included
:type expand_merchant: bool
:returns: Monzo transaction details
:rtype: MonzoTransaction
"""
endpoint = '/transactions/{}'.format(transaction_id)
data = dict()
if expand_merchant:
data['expand[]'] = 'merchant'
response = self._get_response(
method='get', endpoint=endpoint, params=data,
)
return MonzoTransaction(data=response.json()['transaction']) | Returns an individual transaction, fetched by its id.
Official docs:
https://monzo.com/docs/#retrieve-transaction
:param transaction_id: Monzo transaction ID
:type transaction_id: str
:param expand_merchant: whether merchant data should be included
:type expand_merchant: bool
:returns: Monzo transaction details
:rtype: MonzoTransaction | entailment |
def launcher():
"""Launch it."""
parser = OptionParser()
parser.add_option(
'-f',
'--file',
dest='filename',
default='agents.csv',
help='snmposter configuration file'
)
options, args = parser.parse_args()
factory = SNMPosterFactory()
snmpd_status = subprocess.Popen(
["service", "snmpd", "status"],
stdout=subprocess.PIPE
).communicate()[0]
if "is running" in snmpd_status:
message = "snmd service is running. Please stop it and try again."
print >> sys.stderr, message
sys.exit(1)
try:
factory.configure(options.filename)
except IOError:
print >> sys.stderr, "Error opening %s." % options.filename
sys.exit(1)
factory.start() | Launch it. | entailment |
def get_auth_string(self):
"""Create auth string from credentials."""
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)
return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8') | Create auth string from credentials. | entailment |
def make_auth_headers(self, content_type):
"""Add authorization header."""
headers = self.make_headers(content_type)
headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())
return headers | Add authorization header. | entailment |
def request(self, method, url, body=None, content_type='application/json'):
"""Send http request."""
headers = self.make_auth_headers(content_type)
connection = http_client.HTTPSConnection(self.apibase)
connection.request(method, url, body, headers=headers)
response = connection.getresponse()
data = response.read()
connection.close()
if response.status not in [200, 201]:
raise SauceException('{}: {}.\nSauce Status NOT OK'.format(
response.status, response.reason), response=response)
return json.loads(data.decode('utf-8')) | Send http request. | entailment |
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Access basic account information. | entailment |
def create_user(self, username, password, name, email):
"""Create a sub account."""
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body) | Create a sub account. | entailment |
def get_concurrency(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/concurrency'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Check account concurrency limits. | entailment |
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get a list of sub accounts associated with a parent account. | entailment |
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get a list of sibling accounts associated with provided account. | entailment |
def get_subaccount_info(self):
"""Get information about a sub account."""
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Get information about a sub account. | entailment |
def change_access_key(self):
"""Change access key of your account."""
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | Change access key of your account. | entailment |
def get_activity(self):
"""Check account concurrency limits."""
method = 'GET'
endpoint = '/rest/v1/{}/activity'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Check account concurrency limits. | entailment |
def get_usage(self, start=None, end=None):
"""Access historical account usage data."""
method = 'GET'
endpoint = '/rest/v1/users/{}/usage'.format(self.client.sauce_username)
data = {}
if start:
data['start'] = start
if end:
data['end'] = end
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint) | Access historical account usage data. | entailment |
def get_platforms(self, automation_api='all'):
"""Get a list of objects describing all the OS and browser platforms
currently supported on Sauce Labs."""
method = 'GET'
endpoint = '/rest/v1/info/platforms/{}'.format(automation_api)
return self.client.request(method, endpoint) | Get a list of objects describing all the OS and browser platforms
currently supported on Sauce Labs. | entailment |
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None,
output_format=None):
"""List jobs belonging to a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username)
data = {}
if full is not None:
data['full'] = full
if limit is not None:
data['limit'] = limit
if skip is not None:
data['skip'] = skip
if start is not None:
data['from'] = start
if end is not None:
data['to'] = end
if output_format is not None:
data['format'] = output_format
if data:
endpoint = '?'.join([endpoint, urlencode(data)])
return self.client.request(method, endpoint) | List jobs belonging to a specific user. | entailment |
def update_job(self, job_id, build=None, custom_data=None,
name=None, passed=None, public=None, tags=None):
"""Edit an existing job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username,
job_id)
data = {}
if build is not None:
data['build'] = build
if custom_data is not None:
data['custom-data'] = custom_data
if name is not None:
data['name'] = name
if passed is not None:
data['passed'] = passed
if public is not None:
data['public'] = public
if tags is not None:
data['tags'] = tags
body = json.dumps(data)
return self.client.request(method, endpoint, body=body) | Edit an existing job. | entailment |
def stop_job(self, job_id):
"""Terminates a running job."""
method = 'PUT'
endpoint = '/rest/v1/{}/jobs/{}/stop'.format(
self.client.sauce_username, job_id)
return self.client.request(method, endpoint) | Terminates a running job. | entailment |
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job."""
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename) | Get details about the static assets collected for a specific job. | entailment |
def get_auth_token(self, job_id, date_range=None):
"""Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results
"""
key = '{}:{}'.format(self.client.sauce_username,
self.client.sauce_access_key)
if date_range:
key = '{}:{}'.format(key, date_range)
return hmac.new(key.encode('utf-8'), job_id.encode('utf-8'),
md5).hexdigest() | Get an auth token to access protected job resources.
https://wiki.saucelabs.com/display/DOCS/Building+Links+to+Test+Results | entailment |
def upload_file(self, filepath, overwrite=True):
"""Uploads a file to the temporary sauce storage."""
method = 'POST'
filename = os.path.split(filepath)[1]
endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format(
self.client.sauce_username, filename, "true" if overwrite else "false")
with open(filepath, 'rb') as filehandle:
body = filehandle.read()
return self.client.request(method, endpoint, body,
content_type='application/octet-stream') | Uploads a file to the temporary sauce storage. | entailment |
def get_stored_files(self):
"""Check which files are in your temporary storage."""
method = 'GET'
endpoint = '/rest/v1/storage/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Check which files are in your temporary storage. | entailment |
def get_tunnels(self):
"""Retrieves all running tunnels for a specific user."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Retrieves all running tunnels for a specific user. | entailment |
def get_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint) | Get information for a tunnel given its ID. | entailment |
def apply(patch):
"""Apply a patch.
The patch's :attr:`~Patch.obj` attribute is injected into the patch's
:attr:`~Patch.destination` under the patch's :attr:`~Patch.name`.
This is a wrapper around calling
``setattr(patch.destination, patch.name, patch.obj)``.
Parameters
----------
patch : gorilla.Patch
Patch.
Raises
------
RuntimeError
Overwriting an existing attribute is not allowed when the setting
:attr:`Settings.allow_hit` is set to ``True``.
Note
----
If both the attributes :attr:`Settings.allow_hit` and
:attr:`Settings.store_hit` are ``True`` but that the target attribute seems
to have already been stored, then it won't be stored again to avoid losing
the original attribute that was stored the first time around.
"""
settings = Settings() if patch.settings is None else patch.settings
# When a hit occurs due to an attribute at the destination already existing
# with the patch's name, the existing attribute is referred to as 'target'.
try:
target = get_attribute(patch.destination, patch.name)
except AttributeError:
pass
else:
if not settings.allow_hit:
raise RuntimeError(
"An attribute named '%s' already exists at the destination "
"'%s'. Set a different name through the patch object to avoid "
"a name clash or set the setting 'allow_hit' to True to "
"overwrite the attribute. In the latter case, it is "
"recommended to also set the 'store_hit' setting to True in "
"order to store the original attribute under a different "
"name so it can still be accessed."
% (patch.name, patch.destination.__name__))
if settings.store_hit:
original_name = _ORIGINAL_NAME % (patch.name,)
if not hasattr(patch.destination, original_name):
setattr(patch.destination, original_name, target)
setattr(patch.destination, patch.name, patch.obj) | Apply a patch.
The patch's :attr:`~Patch.obj` attribute is injected into the patch's
:attr:`~Patch.destination` under the patch's :attr:`~Patch.name`.
This is a wrapper around calling
``setattr(patch.destination, patch.name, patch.obj)``.
Parameters
----------
patch : gorilla.Patch
Patch.
Raises
------
RuntimeError
Overwriting an existing attribute is not allowed when the setting
:attr:`Settings.allow_hit` is set to ``True``.
Note
----
If both the attributes :attr:`Settings.allow_hit` and
:attr:`Settings.store_hit` are ``True`` but that the target attribute seems
to have already been stored, then it won't be stored again to avoid losing
the original attribute that was stored the first time around. | entailment |
def patch(destination, name=None, settings=None):
"""Decorator to create a patch.
The object being decorated becomes the :attr:`~Patch.obj` attribute of the
patch.
Parameters
----------
destination : object
Patch destination.
name : str
Name of the attribute at the destination.
settings : gorilla.Settings
Settings.
Returns
-------
object
The decorated object.
See Also
--------
:class:`Patch`.
"""
def decorator(wrapped):
base = _get_base(wrapped)
name_ = base.__name__ if name is None else name
settings_ = copy.deepcopy(settings)
patch = Patch(destination, name_, wrapped, settings=settings_)
data = get_decorator_data(base, set_default=True)
data.patches.append(patch)
return wrapped
return decorator | Decorator to create a patch.
The object being decorated becomes the :attr:`~Patch.obj` attribute of the
patch.
Parameters
----------
destination : object
Patch destination.
name : str
Name of the attribute at the destination.
settings : gorilla.Settings
Settings.
Returns
-------
object
The decorated object.
See Also
--------
:class:`Patch`. | entailment |
def patches(destination, settings=None, traverse_bases=True,
filter=default_filter, recursive=True, use_decorators=True):
"""Decorator to create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
Allows to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
object
The decorated object.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:class:`Patch`, :func:`create_patches`.
"""
def decorator(wrapped):
settings_ = copy.deepcopy(settings)
patches = create_patches(
destination, wrapped, settings=settings_,
traverse_bases=traverse_bases, filter=filter, recursive=recursive,
use_decorators=use_decorators)
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.patches.extend(patches)
return wrapped
return decorator | Decorator to create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
Allows to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
object
The decorated object.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:class:`Patch`, :func:`create_patches`. | entailment |
def destination(value):
"""Modifier decorator to update a patch's destination.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
value : object
Patch destination.
Returns
-------
object
The decorated object.
"""
def decorator(wrapped):
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.override['destination'] = value
return wrapped
return decorator | Modifier decorator to update a patch's destination.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
value : object
Patch destination.
Returns
-------
object
The decorated object. | entailment |
def settings(**kwargs):
"""Modifier decorator to update a patch's settings.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
kwargs
Settings to update. See :class:`Settings` for the list.
Returns
-------
object
The decorated object.
"""
def decorator(wrapped):
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.override.setdefault('settings', {}).update(kwargs)
return wrapped
return decorator | Modifier decorator to update a patch's settings.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
kwargs
Settings to update. See :class:`Settings` for the list.
Returns
-------
object
The decorated object. | entailment |
def filter(value):
"""Modifier decorator to force the inclusion or exclusion of an attribute.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
value : bool
``True`` to force inclusion, ``False`` to force exclusion, and ``None``
to inherit from the behaviour defined by :func:`create_patches` or
:func:`patches`.
Returns
-------
object
The decorated object.
"""
def decorator(wrapped):
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.filter = value
return wrapped
return decorator | Modifier decorator to force the inclusion or exclusion of an attribute.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
value : bool
``True`` to force inclusion, ``False`` to force exclusion, and ``None``
to inherit from the behaviour defined by :func:`create_patches` or
:func:`patches`.
Returns
-------
object
The decorated object. | entailment |
def create_patches(destination, root, settings=None, traverse_bases=True,
filter=default_filter, recursive=True, use_decorators=True):
"""Create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
root : object
Root object, either a module or a class.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
``True`` to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
list of gorilla.Patch
The patches.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:func:`patches`.
"""
if filter is None:
filter = _true
out = []
root_patch = Patch(destination, '', root, settings=settings)
stack = collections.deque((root_patch,))
while stack:
parent_patch = stack.popleft()
members = _get_members(parent_patch.obj, traverse_bases=traverse_bases,
filter=None, recursive=False)
for name, value in members:
patch = Patch(parent_patch.destination, name, value,
settings=copy.deepcopy(parent_patch.settings))
if use_decorators:
base = _get_base(value)
decorator_data = get_decorator_data(base)
filter_override = (None if decorator_data is None
else decorator_data.filter)
if ((filter_override is None and not filter(name, value))
or filter_override is False):
continue
if decorator_data is not None:
patch._update(**decorator_data.override)
elif not filter(name, value):
continue
if recursive and isinstance(value, _CLASS_TYPES):
try:
target = get_attribute(patch.destination, patch.name)
except AttributeError:
pass
else:
if isinstance(target, _CLASS_TYPES):
patch.destination = target
stack.append(patch)
continue
out.append(patch)
return out | Create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
root : object
Root object, either a module or a class.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
``True`` to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
list of gorilla.Patch
The patches.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:func:`patches`. | entailment |
def find_patches(modules, recursive=True):
"""Find all the patches created through decorators.
Parameters
----------
modules : list of module
Modules and/or packages to search the patches in.
recursive : bool
``True`` to search recursively in subpackages.
Returns
-------
list of gorilla.Patch
Patches found.
Raises
------
TypeError
The input is not a valid package or module.
See Also
--------
:func:`patch`, :func:`patches`.
"""
out = []
modules = (module
for package in modules
for module in _module_iterator(package, recursive=recursive))
for module in modules:
members = _get_members(module, filter=None)
for _, value in members:
base = _get_base(value)
decorator_data = get_decorator_data(base)
if decorator_data is None:
continue
out.extend(decorator_data.patches)
return out | Find all the patches created through decorators.
Parameters
----------
modules : list of module
Modules and/or packages to search the patches in.
recursive : bool
``True`` to search recursively in subpackages.
Returns
-------
list of gorilla.Patch
Patches found.
Raises
------
TypeError
The input is not a valid package or module.
See Also
--------
:func:`patch`, :func:`patches`. | entailment |
def get_attribute(obj, name):
"""Retrieve an attribute while bypassing the descriptor protocol.
As per the built-in |getattr()|_ function, if the input object is a class
then its base classes might also be searched until the attribute is found.
Parameters
----------
obj : object
Object to search the attribute in.
name : str
Name of the attribute.
Returns
-------
object
The attribute found.
Raises
------
AttributeError
The attribute couldn't be found.
.. |getattr()| replace:: ``getattr()``
.. _getattr(): https://docs.python.org/library/functions.html#getattr
"""
objs = inspect.getmro(obj) if isinstance(obj, _CLASS_TYPES) else [obj]
for obj_ in objs:
try:
return object.__getattribute__(obj_, name)
except AttributeError:
pass
raise AttributeError("'%s' object has no attribute '%s'"
% (type(obj), name)) | Retrieve an attribute while bypassing the descriptor protocol.
As per the built-in |getattr()|_ function, if the input object is a class
then its base classes might also be searched until the attribute is found.
Parameters
----------
obj : object
Object to search the attribute in.
name : str
Name of the attribute.
Returns
-------
object
The attribute found.
Raises
------
AttributeError
The attribute couldn't be found.
.. |getattr()| replace:: ``getattr()``
.. _getattr(): https://docs.python.org/library/functions.html#getattr | entailment |
def get_decorator_data(obj, set_default=False):
"""Retrieve any decorator data from an object.
Parameters
----------
obj : object
Object.
set_default : bool
If no data is found, a default one is set on the object and returned,
otherwise ``None`` is returned.
Returns
-------
gorilla.DecoratorData
The decorator data or ``None``.
"""
if isinstance(obj, _CLASS_TYPES):
datas = getattr(obj, _DECORATOR_DATA, {})
data = datas.setdefault(obj, None)
if data is None and set_default:
data = DecoratorData()
datas[obj] = data
setattr(obj, _DECORATOR_DATA, datas)
else:
data = getattr(obj, _DECORATOR_DATA, None)
if data is None and set_default:
data = DecoratorData()
setattr(obj, _DECORATOR_DATA, data)
return data | Retrieve any decorator data from an object.
Parameters
----------
obj : object
Object.
set_default : bool
If no data is found, a default one is set on the object and returned,
otherwise ``None`` is returned.
Returns
-------
gorilla.DecoratorData
The decorator data or ``None``. | entailment |
def _get_base(obj):
"""Unwrap decorators to retrieve the base object."""
if hasattr(obj, '__func__'):
obj = obj.__func__
elif isinstance(obj, property):
obj = obj.fget
elif isinstance(obj, (classmethod, staticmethod)):
# Fallback for Python < 2.7 back when no `__func__` attribute
# was defined for those descriptors.
obj = obj.__get__(None, object)
else:
return obj
return _get_base(obj) | Unwrap decorators to retrieve the base object. | entailment |
def _get_members(obj, traverse_bases=True, filter=default_filter,
recursive=True):
"""Retrieve the member attributes of a module or a class.
The descriptor protocol is bypassed."""
if filter is None:
filter = _true
out = []
stack = collections.deque((obj,))
while stack:
obj = stack.popleft()
if traverse_bases and isinstance(obj, _CLASS_TYPES):
roots = [base for base in inspect.getmro(obj)
if base not in (type, object)]
else:
roots = [obj]
members = []
seen = set()
for root in roots:
for name, value in _iteritems(getattr(root, '__dict__', {})):
if name not in seen and filter(name, value):
members.append((name, value))
seen.add(name)
members = sorted(members)
for _, value in members:
if recursive and isinstance(value, _CLASS_TYPES):
stack.append(value)
out.extend(members)
return out | Retrieve the member attributes of a module or a class.
The descriptor protocol is bypassed. | entailment |
def _module_iterator(root, recursive=True):
"""Iterate over modules."""
yield root
stack = collections.deque((root,))
while stack:
package = stack.popleft()
# The '__path__' attribute of a package might return a list of paths if
# the package is referenced as a namespace.
paths = getattr(package, '__path__', [])
for path in paths:
modules = pkgutil.iter_modules([path])
for finder, name, is_package in modules:
module_name = '%s.%s' % (package.__name__, name)
module = sys.modules.get(module_name, None)
if module is None:
# Import the module through the finder to support package
# namespaces.
module = _load_module(finder, module_name)
if is_package:
if recursive:
stack.append(module)
yield module
else:
yield module | Iterate over modules. | entailment |
def _update(self, **kwargs):
"""Update some attributes.
If a 'settings' attribute is passed as a dict, then it updates the
content of the settings, if any, instead of completely overwriting it.
"""
for key, value in _iteritems(kwargs):
if key == 'settings':
if isinstance(value, dict):
if self.settings is None:
self.settings = Settings(**value)
else:
self.settings._update(**value)
else:
self.settings = copy.deepcopy(value)
else:
setattr(self, key, value) | Update some attributes.
If a 'settings' attribute is passed as a dict, then it updates the
content of the settings, if any, instead of completely overwriting it. | entailment |
def request(self, path, action, data=''):
"""To make a request to the API."""
# Check if the path includes URL or not.
head = self.base_url
if path.startswith(head):
path = path[len(head):]
path = quote_plus(path, safe='/')
if not path.startswith(self.api):
path = self.api + path
log.debug('Using path %s' % path)
# If we have data, convert to JSON
if data:
data = json.dumps(data)
log.debug('Data to sent: %s' % data)
# In case of key authentication
if self.private_key and self.public_key:
timestamp = str(int(time.time()))
log.debug('Using timestamp: {}'.format(timestamp))
unhashed = path + timestamp + str(data)
log.debug('Using message: {}'.format(unhashed))
self.hash = hmac.new(str.encode(self.private_key),
msg=unhashed.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
log.debug('Authenticating with hash: %s' % self.hash)
self.headers['X-Public-Key'] = self.public_key
self.headers['X-Request-Hash'] = self.hash
self.headers['X-Request-Timestamp'] = timestamp
auth = False
# In case of user credentials authentication
elif self.username and self.password:
auth = requests.auth.HTTPBasicAuth(self.username, self.password)
# Set unlock reason
if self.unlock_reason:
self.headers['X-Unlock-Reason'] = self.unlock_reason
log.info('Unlock Reason: %s' % self.unlock_reason)
url = head + path
# Try API request and handle Exceptions
try:
if action == 'get':
log.debug('GET request %s' % url)
self.req = requests.get(url, headers=self.headers, auth=auth,
verify=False)
elif action == 'post':
log.debug('POST request %s' % url)
self.req = requests.post(url, headers=self.headers, auth=auth,
verify=False, data=data)
elif action == 'put':
log.debug('PUT request %s' % url)
self.req = requests.put(url, headers=self.headers,
auth=auth, verify=False,
data=data)
elif action == 'delete':
log.debug('DELETE request %s' % url)
self.req = requests.delete(url, headers=self.headers,
verify=False, auth=auth)
if self.req.content == b'':
result = None
log.debug('No result returned.')
else:
result = self.req.json()
if 'error' in result and result['error']:
raise TPMException(result['message'])
except requests.exceptions.RequestException as e:
log.critical("Connection error for " + str(e))
raise TPMException("Connection error for " + str(e))
except ValueError as e:
if self.req.status_code == 403:
log.warning(url + " forbidden")
raise TPMException(url + " forbidden")
elif self.req.status_code == 404:
log.warning(url + " forbidden")
raise TPMException(url + " not found")
else:
message = ('%s: %s %s' % (e, self.req.url, self.req.text))
log.debug(message)
raise ValueError(message)
return result | To make a request to the API. | entailment |
def get_collection(self, path):
"""To get pagewise data."""
while True:
items = self.get(path)
req = self.req
for item in items:
yield item
if req.links and req.links['next'] and\
req.links['next']['rel'] == 'next':
path = req.links['next']['url']
else:
break | To get pagewise data. | entailment |
def collection(self, path):
"""To return all items generated by get collection."""
data = []
for item in self.get_collection(path):
data.append(item)
return data | To return all items generated by get collection. | entailment |
def list_projects_search(self, searchstring):
"""List projects with searchstring."""
log.debug('List all projects with: %s' % searchstring)
return self.collection('projects/search/%s.json' %
quote_plus(searchstring)) | List projects with searchstring. | entailment |
def create_project(self, data):
"""Create a project."""
# http://teampasswordmanager.com/docs/api-projects/#create_project
log.info('Create project: %s' % data)
NewID = self.post('projects.json', data).get('id')
log.info('Project has been created with ID %s' % NewID)
return NewID | Create a project. | entailment |
def update_project(self, ID, data):
"""Update a project."""
# http://teampasswordmanager.com/docs/api-projects/#update_project
log.info('Update project %s with %s' % (ID, data))
self.put('projects/%s.json' % ID, data) | Update a project. | entailment |
def change_parent_of_project(self, ID, NewParrentID):
"""Change parent of project."""
# http://teampasswordmanager.com/docs/api-projects/#change_parent
log.info('Change parrent for project %s to %s' % (ID, NewParrentID))
data = {'parent_id': NewParrentID}
self.put('projects/%s/change_parent.json' % ID, data) | Change parent of project. | entailment |
def update_security_of_project(self, ID, data):
"""Update security of project."""
# http://teampasswordmanager.com/docs/api-projects/#update_project_security
log.info('Update project %s security %s' % (ID, data))
self.put('projects/%s/security.json' % ID, data) | Update security of project. | entailment |
def list_passwords_search(self, searchstring):
"""List passwords with searchstring."""
log.debug('List all passwords with: %s' % searchstring)
return self.collection('passwords/search/%s.json' %
quote_plus(searchstring)) | List passwords with searchstring. | entailment |
def create_password(self, data):
"""Create a password."""
# http://teampasswordmanager.com/docs/api-passwords/#create_password
log.info('Create new password %s' % data)
NewID = self.post('passwords.json', data).get('id')
log.info('Password has been created with ID %s' % NewID)
return NewID | Create a password. | entailment |
def update_password(self, ID, data):
"""Update a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_password
log.info('Update Password %s with %s' % (ID, data))
self.put('passwords/%s.json' % ID, data) | Update a password. | entailment |
def update_security_of_password(self, ID, data):
"""Update security of a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_security_password
log.info('Update security of password %s with %s' % (ID, data))
self.put('passwords/%s/security.json' % ID, data) | Update security of a password. | entailment |
def update_custom_fields_of_password(self, ID, data):
"""Update custom fields definitions of a password."""
# http://teampasswordmanager.com/docs/api-passwords/#update_cf_password
log.info('Update custom fields of password %s with %s' % (ID, data))
self.put('passwords/%s/custom_fields.json' % ID, data) | Update custom fields definitions of a password. | entailment |
def unlock_password(self, ID, reason):
"""Unlock a password."""
# http://teampasswordmanager.com/docs/api-passwords/#unlock_password
log.info('Unlock password %s, Reason: %s' % (ID, reason))
self.unlock_reason = reason
self.put('passwords/%s/unlock.json' % ID) | Unlock a password. | entailment |
def list_mypasswords_search(self, searchstring):
"""List my passwords with searchstring."""
# http://teampasswordmanager.com/docs/api-my-passwords/#list_passwords
log.debug('List MyPasswords with %s' % searchstring)
return self.collection('my_passwords/search/%s.json' %
quote_plus(searchstring)) | List my passwords with searchstring. | entailment |
def create_mypassword(self, data):
"""Create my password."""
# http://teampasswordmanager.com/docs/api-my-passwords/#create_password
log.info('Create MyPassword with %s' % data)
NewID = self.post('my_passwords.json', data).get('id')
log.info('MyPassword has been created with %s' % NewID)
return NewID | Create my password. | entailment |
def update_mypassword(self, ID, data):
"""Update my password."""
# http://teampasswordmanager.com/docs/api-my-passwords/#update_password
log.info('Update MyPassword %s with %s' % (ID, data))
self.put('my_passwords/%s.json' % ID, data) | Update my password. | entailment |
def create_user(self, data):
"""Create a User."""
# http://teampasswordmanager.com/docs/api-users/#create_user
log.info('Create user with %s' % data)
NewID = self.post('users.json', data).get('id')
log.info('User has been created with ID %s' % NewID)
return NewID | Create a User. | entailment |
def update_user(self, ID, data):
"""Update a User."""
# http://teampasswordmanager.com/docs/api-users/#update_user
log.info('Update user %s with %s' % (ID, data))
self.put('users/%s.json' % ID, data) | Update a User. | entailment |
def change_user_password(self, ID, data):
"""Change password of a User."""
# http://teampasswordmanager.com/docs/api-users/#change_password
log.info('Change user %s password' % ID)
self.put('users/%s/change_password.json' % ID, data) | Change password of a User. | entailment |
def convert_user_to_ldap(self, ID, DN):
"""Convert a normal user to a LDAP user."""
# http://teampasswordmanager.com/docs/api-users/#convert_to_ldap
data = {'login_dn': DN}
log.info('Convert User %s to LDAP DN %s' % (ID, DN))
self.put('users/%s/convert_to_ldap.json' % ID, data) | Convert a normal user to a LDAP user. | entailment |
def create_group(self, data):
"""Create a Group."""
# http://teampasswordmanager.com/docs/api-groups/#create_group
log.info('Create group with %s' % data)
NewID = self.post('groups.json', data).get('id')
log.info('Group has been created with ID %s' % NewID)
return NewID | Create a Group. | entailment |
def update_group(self, ID, data):
"""Update a Group."""
# http://teampasswordmanager.com/docs/api-groups/#update_group
log.info('Update group %s with %s' % (ID, data))
self.put('groups/%s.json' % ID, data) | Update a Group. | entailment |
def add_user_to_group(self, GroupID, UserID):
"""Add a user to a group."""
# http://teampasswordmanager.com/docs/api-groups/#add_user
log.info('Add User %s to Group %s' % (UserID, GroupID))
self.put('groups/%s/add_user/%s.json' % (GroupID, UserID)) | Add a user to a group. | entailment |
def delete_user_from_group(self, GroupID, UserID):
"""Delete a user from a group."""
# http://teampasswordmanager.com/docs/api-groups/#del_user
log.info('Delete user %s from group %s' % (UserID, GroupID))
self.put('groups/%s/delete_user/%s.json' % (GroupID, UserID)) | Delete a user from a group. | entailment |
def up_to_date(self):
"""Check if Team Password Manager is up to date."""
VersionInfo = self.get_latest_version()
CurrentVersion = VersionInfo.get('version')
LatestVersion = VersionInfo.get('latest_version')
if CurrentVersion == LatestVersion:
log.info('TeamPasswordManager is up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return True
else:
log.warning('TeamPasswordManager is not up-to-date!')
log.debug('Current Version: {} Latest Version: {}'.format(LatestVersion, LatestVersion))
return False | Check if Team Password Manager is up to date. | entailment |
def convert_exception(from_exception, to_exception, *to_args, **to_kw):
"""
Decorator: Catch exception ``from_exception`` and instead raise ``to_exception(*to_args, **to_kw)``.
Useful when modules you're using in a method throw their own errors that you want to
convert to your own exceptions that you handle higher in the stack.
Example: ::
class FooError(Exception):
pass
class BarError(Exception):
def __init__(self, message):
self.message = message
@convert_exception(FooError, BarError, message='bar')
def throw_foo():
raise FooError('foo')
try:
throw_foo()
except BarError as e:
assert e.message == 'bar'
"""
def wrapper(fn):
def fn_new(*args, **kw):
try:
return fn(*args, **kw)
except from_exception:
new_exception = to_exception(*to_args, **to_kw)
traceback = sys.exc_info()[2]
if PY3:
value = new_exception
else:
value = None
reraise(new_exception, value, traceback)
fn_new.__doc__ = fn.__doc__
return fn_new
return wrapper | Decorator: Catch exception ``from_exception`` and instead raise ``to_exception(*to_args, **to_kw)``.
Useful when modules you're using in a method throw their own errors that you want to
convert to your own exceptions that you handle higher in the stack.
Example: ::
class FooError(Exception):
pass
class BarError(Exception):
def __init__(self, message):
self.message = message
@convert_exception(FooError, BarError, message='bar')
def throw_foo():
raise FooError('foo')
try:
throw_foo()
except BarError as e:
assert e.message == 'bar' | entailment |
def iterate_date_values(d, start_date=None, stop_date=None, default=0):
"""
Convert (date, value) sorted lists into contiguous value-per-day data sets. Great for sparklines.
Example::
[(datetime.date(2011, 1, 1), 1), (datetime.date(2011, 1, 4), 2)] -> [1, 0, 0, 2]
"""
dataiter = iter(d)
cur_day, cur_val = next(dataiter)
start_date = start_date or cur_day
while cur_day < start_date:
cur_day, cur_val = next(dataiter)
for d in iterate_date(start_date, stop_date):
if d != cur_day:
yield default
continue
yield cur_val
try:
cur_day, cur_val = next(dataiter)
except StopIteration:
if not stop_date:
raise | Convert (date, value) sorted lists into contiguous value-per-day data sets. Great for sparklines.
Example::
[(datetime.date(2011, 1, 1), 1), (datetime.date(2011, 1, 4), 2)] -> [1, 0, 0, 2] | entailment |
def truncate_datetime(t, resolution):
"""
Given a datetime ``t`` and a ``resolution``, flatten the precision beyond the given resolution.
``resolution`` can be one of: year, month, day, hour, minute, second, microsecond
Example::
>>> t = datetime.datetime(2000, 1, 2, 3, 4, 5, 6000) # Or, 2000-01-02 03:04:05.006000
>>> truncate_datetime(t, 'day')
datetime.datetime(2000, 1, 2, 0, 0)
>>> _.isoformat()
'2000-01-02T00:00:00'
>>> truncate_datetime(t, 'minute')
datetime.datetime(2000, 1, 2, 3, 4)
>>> _.isoformat()
'2000-01-02T03:04:00'
"""
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
if resolution not in resolutions:
raise KeyError("Resolution is not valid: {0}".format(resolution))
args = []
for r in resolutions:
args += [getattr(t, r)]
if r == resolution:
break
return datetime.datetime(*args) | Given a datetime ``t`` and a ``resolution``, flatten the precision beyond the given resolution.
``resolution`` can be one of: year, month, day, hour, minute, second, microsecond
Example::
>>> t = datetime.datetime(2000, 1, 2, 3, 4, 5, 6000) # Or, 2000-01-02 03:04:05.006000
>>> truncate_datetime(t, 'day')
datetime.datetime(2000, 1, 2, 0, 0)
>>> _.isoformat()
'2000-01-02T00:00:00'
>>> truncate_datetime(t, 'minute')
datetime.datetime(2000, 1, 2, 3, 4)
>>> _.isoformat()
'2000-01-02T03:04:00' | entailment |
def to_timezone(dt, timezone):
"""
Return an aware datetime which is ``dt`` converted to ``timezone``.
If ``dt`` is naive, it is assumed to be UTC.
For example, if ``dt`` is "06:00 UTC+0000" and ``timezone`` is "EDT-0400",
then the result will be "02:00 EDT-0400".
This method follows the guidelines in http://pytz.sourceforge.net/
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_UTC)
return timezone.normalize(dt.astimezone(timezone)) | Return an aware datetime which is ``dt`` converted to ``timezone``.
If ``dt`` is naive, it is assumed to be UTC.
For example, if ``dt`` is "06:00 UTC+0000" and ``timezone`` is "EDT-0400",
then the result will be "02:00 EDT-0400".
This method follows the guidelines in http://pytz.sourceforge.net/ | entailment |
def now(timezone=None):
"""
Return a naive datetime object for the given ``timezone``. A ``timezone``
is any pytz- like or datetime.tzinfo-like timezone object. If no timezone
is given, then UTC is assumed.
This method is best used with pytz installed::
pip install pytz
"""
d = datetime.datetime.utcnow()
if not timezone:
return d
return to_timezone(d, timezone).replace(tzinfo=None) | Return a naive datetime object for the given ``timezone``. A ``timezone``
is any pytz- like or datetime.tzinfo-like timezone object. If no timezone
is given, then UTC is assumed.
This method is best used with pytz installed::
pip install pytz | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.