repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
icq-bot/python-icq-bot | example/util.py | random_choice | def random_choice(sequence):
""" Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. """
return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence) | python | def random_choice(sequence):
""" Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. """
return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence) | Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. | https://github.com/icq-bot/python-icq-bot/blob/1d278cc91f8eba5481bb8d70f80fc74160a40c8b/example/util.py#L36-L38 |
torchbox/wagtail-markdown | wagtailmarkdown/utils.py | render_markdown | def render_markdown(text, context=None):
"""
Turn markdown into HTML.
"""
if context is None or not isinstance(context, dict):
context = {}
markdown_html = _transform_markdown_into_html(text)
sanitised_markdown_html = _sanitise_markdown_html(markdown_html)
return mark_safe(sanitised_markdown_html) | python | def render_markdown(text, context=None):
"""
Turn markdown into HTML.
"""
if context is None or not isinstance(context, dict):
context = {}
markdown_html = _transform_markdown_into_html(text)
sanitised_markdown_html = _sanitise_markdown_html(markdown_html)
return mark_safe(sanitised_markdown_html) | Turn markdown into HTML. | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/utils.py#L22-L30 |
torchbox/wagtail-markdown | wagtailmarkdown/utils.py | render | def render(text, context=None):
"""
Depreceated call to render_markdown().
"""
warning = (
"wagtailmarkdown.utils.render() is deprecated. Use "
"wagtailmarkdown.utils.render_markdown() instead."
)
warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2)
return render_markdown(text, context) | python | def render(text, context=None):
"""
Depreceated call to render_markdown().
"""
warning = (
"wagtailmarkdown.utils.render() is deprecated. Use "
"wagtailmarkdown.utils.render_markdown() instead."
)
warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2)
return render_markdown(text, context) | Depreceated call to render_markdown(). | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/utils.py#L154-L163 |
torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableProcessor.run | def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
table.set('class', 'wftable')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border) | python | def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
table.set('class', 'wftable')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border) | Parse a table block and build table. | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L33-L61 |
torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableProcessor._build_row | def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a) | python | def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a) | Given a row of text, build table cells. | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L63-L79 |
torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableProcessor._split_row | def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | python | def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | split a row of text into list of cells. | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L81-L88 |
torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableExtension.extendMarkdown | def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader') | python | def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader') | Add an instance of TableProcessor to BlockParser. | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L94-L98 |
kottenator/django-compressor-toolkit | compressor_toolkit/precompilers.py | get_all_static | def get_all_static():
"""
Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only)
"""
static_dirs = set()
for finder in settings.STATICFILES_FINDERS:
finder = finders.get_finder(finder)
if hasattr(finder, 'storages'):
for storage in finder.storages.values():
static_dirs.add(storage.location)
if hasattr(finder, 'storage'):
static_dirs.add(finder.storage.location)
return static_dirs | python | def get_all_static():
"""
Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only)
"""
static_dirs = set()
for finder in settings.STATICFILES_FINDERS:
finder = finders.get_finder(finder)
if hasattr(finder, 'storages'):
for storage in finder.storages.values():
static_dirs.add(storage.location)
if hasattr(finder, 'storage'):
static_dirs.add(finder.storage.location)
return static_dirs | Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only) | https://github.com/kottenator/django-compressor-toolkit/blob/e7bfdaa354e9c9189db0e4ba4fa049045adad91b/compressor_toolkit/precompilers.py#L13-L31 |
kottenator/django-compressor-toolkit | compressor_toolkit/precompilers.py | BaseCompiler.input | def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | python | def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469 | https://github.com/kottenator/django-compressor-toolkit/blob/e7bfdaa354e9c9189db0e4ba4fa049045adad91b/compressor_toolkit/precompilers.py#L38-L53 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | graph_hash | def graph_hash(obj):
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick'''
obj_type = type(obj)
try:
# this works for hashables
return hash((obj_type, obj))
except:
# this works for object containers since graphdb
# wants to identify different containers
# instead of the sum of their current internals
return hash((obj_type, id(obj))) | python | def graph_hash(obj):
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick'''
obj_type = type(obj)
try:
# this works for hashables
return hash((obj_type, obj))
except:
# this works for object containers since graphdb
# wants to identify different containers
# instead of the sum of their current internals
return hash((obj_type, id(obj))) | this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L13-L23 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.store_item | def store_item(self, item):
''' use this function to store a python object in the database '''
assert not isinstance(item, RamGraphDBNode)
item_hash = graph_hash(item)
if item_hash not in self.nodes:
self.nodes[item_hash] = RamGraphDBNode(item)
return self.nodes[item_hash] | python | def store_item(self, item):
''' use this function to store a python object in the database '''
assert not isinstance(item, RamGraphDBNode)
item_hash = graph_hash(item)
if item_hash not in self.nodes:
self.nodes[item_hash] = RamGraphDBNode(item)
return self.nodes[item_hash] | use this function to store a python object in the database | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L155-L161 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.store_relation | def store_relation(self, src, name, dst):
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src).link(name, self.store_item(dst)) | python | def store_relation(self, src, name, dst):
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src).link(name, self.store_item(dst)) | use this to store a relation between two objects | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L203-L208 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.delete_relation | def delete_relation(self, src, relation, target):
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if src in self and target in self:
self._get_item_node(src).unlink(relation, self._get_item_node(target)) | python | def delete_relation(self, src, relation, target):
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if src in self and target in self:
self._get_item_node(src).unlink(relation, self._get_item_node(target)) | can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L216-L221 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.delete_item | def delete_item(self, item):
''' removes an item from the db '''
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
#print(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
#print(src, relation, item)
h = self._item_hash(item)
if item in self:
#print('deleting item:', item)
self.nodes[h].clear()
del self.nodes[h] | python | def delete_item(self, item):
''' removes an item from the db '''
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
#print(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
#print(src, relation, item)
h = self._item_hash(item)
if item in self:
#print('deleting item:', item)
self.nodes[h].clear()
del self.nodes[h] | removes an item from the db | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L223-L235 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.relations_of | def relations_of(self, target, include_object=False):
''' list all relations the originate from target '''
relations = (target if isinstance(target, RamGraphDBNode) else self._get_item_node(target)).outgoing
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter dead links
yield k, v.obj
else:
yield from relations | python | def relations_of(self, target, include_object=False):
''' list all relations the originate from target '''
relations = (target if isinstance(target, RamGraphDBNode) else self._get_item_node(target)).outgoing
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter dead links
yield k, v.obj
else:
yield from relations | list all relations the originate from target | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L241-L250 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.relations_to | def relations_to(self, target, include_object=False):
''' list all relations pointing at an object '''
relations = self._get_item_node(target).incoming
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter dead links
yield v.obj, k
else:
yield from relations | python | def relations_to(self, target, include_object=False):
''' list all relations pointing at an object '''
relations = self._get_item_node(target).incoming
if include_object:
for k in relations:
for v in relations[k]:
if hasattr(v, 'obj'): # filter dead links
yield v.obj, k
else:
yield from relations | list all relations pointing at an object | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L252-L261 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.show_objects | def show_objects(self):
''' display the entire of objects with their (id, value, node) '''
for key in self.nodes:
node = self.nodes[key]
value = node.obj
print(key, '-', repr(value), '-', node) | python | def show_objects(self):
''' display the entire of objects with their (id, value, node) '''
for key in self.nodes:
node = self.nodes[key]
value = node.obj
print(key, '-', repr(value), '-', node) | display the entire of objects with their (id, value, node) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L276-L281 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.list_relations | def list_relations(self):
''' list every relation in the database as (src, relation, dst) '''
for node in self.iter_nodes():
for relation, target in self.relations_of(node.obj, True):
yield node.obj, relation, target | python | def list_relations(self):
''' list every relation in the database as (src, relation, dst) '''
for node in self.iter_nodes():
for relation, target in self.relations_of(node.obj, True):
yield node.obj, relation, target | list every relation in the database as (src, relation, dst) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L283-L287 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | RamGraphDB.show_relations | def show_relations(self):
''' display every relation in the database as (src, relation, dst) '''
for src_node in self.iter_nodes():
for relation in src_node.outgoing:
for dst_node in src_node.outgoing[relation]:
print(repr(src_node.obj), '-', relation, '-', repr(dst_node.obj)) | python | def show_relations(self):
''' display every relation in the database as (src, relation, dst) '''
for src_node in self.iter_nodes():
for relation in src_node.outgoing:
for dst_node in src_node.outgoing[relation]:
print(repr(src_node.obj), '-', relation, '-', repr(dst_node.obj)) | display every relation in the database as (src, relation, dst) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L289-L294 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList.where | def where(self, relation, filter_fn):
''' use this to filter VLists, simply provide a filter function and what relation to apply it to '''
assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation])) | python | def where(self, relation, filter_fn):
''' use this to filter VLists, simply provide a filter function and what relation to apply it to '''
assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation])) | use this to filter VLists, simply provide a filter function and what relation to apply it to | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L349-L353 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList._where | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | python | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | use this to filter VLists, simply provide a filter function to filter the current found objects | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L355-L358 |
CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList._where | def _where(self, **kwargs):
'''use this to filter VLists with kv pairs'''
out = self
for k,v in kwargs.items():
out = out.where(k, lambda i:i==v)
return out | python | def _where(self, **kwargs):
'''use this to filter VLists with kv pairs'''
out = self
for k,v in kwargs.items():
out = out.where(k, lambda i:i==v)
return out | use this to filter VLists with kv pairs | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L362-L367 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB._create_file | def _create_file(path=''):
''' creates a file at the given path and sets the permissions to user only read/write '''
from os.path import isfile
if not isfile(path): # only do the following if the file doesn't exist yet
from os import chmod
from stat import S_IRUSR, S_IWUSR
open(path, "a").close() # create the file
attempt(lambda: chmod(path, (S_IRUSR | S_IWUSR))) | python | def _create_file(path=''):
''' creates a file at the given path and sets the permissions to user only read/write '''
from os.path import isfile
if not isfile(path): # only do the following if the file doesn't exist yet
from os import chmod
from stat import S_IRUSR, S_IWUSR
open(path, "a").close() # create the file
attempt(lambda: chmod(path, (S_IRUSR | S_IWUSR))) | creates a file at the given path and sets the permissions to user only read/write | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L174-L182 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.store_item | def store_item(self, item):
''' use this function to store a python object in the database '''
#print('storing item', item)
item_id = self._id_of(item)
#print('item_id', item_id)
if item_id is None:
#print('storing item', item)
blob = self.serialize(item)
with self._write_lock:
self._execute(
'INSERT into objects (code) values (?);',
(blob,)
)
self.autocommit() | python | def store_item(self, item):
''' use this function to store a python object in the database '''
#print('storing item', item)
item_id = self._id_of(item)
#print('item_id', item_id)
if item_id is None:
#print('storing item', item)
blob = self.serialize(item)
with self._write_lock:
self._execute(
'INSERT into objects (code) values (?);',
(blob,)
)
self.autocommit() | use this function to store a python object in the database | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L197-L210 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.delete_item | def delete_item(self, item):
''' removes an item from the db '''
for relation in self.relations_of(item):
self.delete_relation(item, relation)
for origin, relation in self.relations_to(item, True):
self.delete_relation(origin, relation, item)
with self._write_lock:
self._execute('''
DELETE from objects where code=?
''', (self.serialize(item),))
self.autocommit() | python | def delete_item(self, item):
''' removes an item from the db '''
for relation in self.relations_of(item):
self.delete_relation(item, relation)
for origin, relation in self.relations_to(item, True):
self.delete_relation(origin, relation, item)
with self._write_lock:
self._execute('''
DELETE from objects where code=?
''', (self.serialize(item),))
self.autocommit() | removes an item from the db | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L212-L222 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.store_relation | def store_relation(self, src, name, dst):
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src)
self.store_item(dst)
with self._write_lock:
#print(locals())
# run the insertion
self._execute(
'insert into relations select ob1.id, ?, ob2.id from objects as ob1, objects as ob2 where ob1.code=? and ob2.code=?;',
(name, self.serialize(src), self.serialize(dst))
)
self.autocommit() | python | def store_relation(self, src, name, dst):
''' use this to store a relation between two objects '''
self.__require_string__(name)
#print('storing relation', src, name, dst)
# make sure both items are stored
self.store_item(src)
self.store_item(dst)
with self._write_lock:
#print(locals())
# run the insertion
self._execute(
'insert into relations select ob1.id, ?, ob2.id from objects as ob1, objects as ob2 where ob1.code=? and ob2.code=?;',
(name, self.serialize(src), self.serialize(dst))
)
self.autocommit() | use this to store a relation between two objects | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L270-L284 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB._delete_single_relation | def _delete_single_relation(self, src, relation, dst):
''' deletes a single relation between objects '''
self.__require_string__(relation)
src_id = self._id_of(src)
dst_id = self._id_of(dst)
with self._write_lock:
self._execute('''
DELETE from relations where src=? and name=? and dst=?
''', (src_id, relation, dst_id))
self.autocommit() | python | def _delete_single_relation(self, src, relation, dst):
''' deletes a single relation between objects '''
self.__require_string__(relation)
src_id = self._id_of(src)
dst_id = self._id_of(dst)
with self._write_lock:
self._execute('''
DELETE from relations where src=? and name=? and dst=?
''', (src_id, relation, dst_id))
self.autocommit() | deletes a single relation between objects | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L286-L295 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.delete_relation | def delete_relation(self, src, relation, *targets):
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if len(targets):
for i in targets:
self._delete_single_relation(src, relation, i)
else:
# delete all connections of that relation from src
for i in list(self.find(src, relation)):
self._delete_single_relation(src, relation, i) | python | def delete_relation(self, src, relation, *targets):
''' can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src '''
self.__require_string__(relation)
if len(targets):
for i in targets:
self._delete_single_relation(src, relation, i)
else:
# delete all connections of that relation from src
for i in list(self.find(src, relation)):
self._delete_single_relation(src, relation, i) | can be both used as (src, relation, dest) for a single relation or
(src, relation) to delete all relations of that type from the src | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L297-L307 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.find | def find(self, target, relation):
''' returns back all elements the target has a relation to '''
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/
for i in self._execute(query, (relation, self.serialize(target))):
yield self.deserialize(i[0]) | python | def find(self, target, relation):
''' returns back all elements the target has a relation to '''
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/
for i in self._execute(query, (relation, self.serialize(target))):
yield self.deserialize(i[0]) | returns back all elements the target has a relation to | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L309-L313 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.relations_of | def relations_of(self, target, include_object=False):
''' list all relations the originate from target '''
if include_object:
_ = self._execute('''
select relations.name, ob2.code from relations, objects as ob1, objects as ob2 where relations.src=ob1.id and ob2.id=relations.dst and ob1.code=?
''', (self.serialize(target),))
for i in _:
yield i[0], self.deserialize(i[1])
else:
_ = self._execute('''
select distinct relations.name from relations, objects where relations.src=objects.id and objects.code=?
''', (self.serialize(target),))
for i in _:
yield i[0] | python | def relations_of(self, target, include_object=False):
''' list all relations the originate from target '''
if include_object:
_ = self._execute('''
select relations.name, ob2.code from relations, objects as ob1, objects as ob2 where relations.src=ob1.id and ob2.id=relations.dst and ob1.code=?
''', (self.serialize(target),))
for i in _:
yield i[0], self.deserialize(i[1])
else:
_ = self._execute('''
select distinct relations.name from relations, objects where relations.src=objects.id and objects.code=?
''', (self.serialize(target),))
for i in _:
yield i[0] | list all relations the originate from target | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L315-L329 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.relations_to | def relations_to(self, target, include_object=False):
''' list all relations pointing at an object '''
if include_object:
_ = self._execute('''
select name, (select code from objects where id=src) from relations where dst=?
''', (self._id_of(target),))
for i in _:
yield self.deserialize(i[1]), i[0]
else:
_ = self._execute('''
select distinct name from relations where dst=?
''', (self._id_of(target),))
for i in _:
yield i[0] | python | def relations_to(self, target, include_object=False):
''' list all relations pointing at an object '''
if include_object:
_ = self._execute('''
select name, (select code from objects where id=src) from relations where dst=?
''', (self._id_of(target),))
for i in _:
yield self.deserialize(i[1]), i[0]
else:
_ = self._execute('''
select distinct name from relations where dst=?
''', (self._id_of(target),))
for i in _:
yield i[0] | list all relations pointing at an object | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L331-L344 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.connections_of | def connections_of(self, target):
''' generate tuples containing (relation, object_that_applies) '''
return gen.chain( ((r,i) for i in self.find(target,r)) for r in self.relations_of(target) ) | python | def connections_of(self, target):
''' generate tuples containing (relation, object_that_applies) '''
return gen.chain( ((r,i) for i in self.find(target,r)) for r in self.relations_of(target) ) | generate tuples containing (relation, object_that_applies) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L346-L348 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.list_objects | def list_objects(self):
''' list the entire of objects with their (id, serialized_form, actual_value) '''
for i in self._execute('select * from objects'):
_id, code = i
yield _id, code, self.deserialize(code) | python | def list_objects(self):
''' list the entire of objects with their (id, serialized_form, actual_value) '''
for i in self._execute('select * from objects'):
_id, code = i
yield _id, code, self.deserialize(code) | list the entire of objects with their (id, serialized_form, actual_value) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L350-L354 |
CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.list_relations | def list_relations(self):
''' list every relation in the database as (src, relation, dst) '''
_ = self._execute('select * from relations').fetchall()
for i in _:
#print(i)
src, name, dst = i
src = self.deserialize(
next(self._execute('select code from objects where id=?',(src,)))[0]
)
dst = self.deserialize(
next(self._execute('select code from objects where id=?',(dst,)))[0]
)
yield src, name, dst | python | def list_relations(self):
''' list every relation in the database as (src, relation, dst) '''
_ = self._execute('select * from relations').fetchall()
for i in _:
#print(i)
src, name, dst = i
src = self.deserialize(
next(self._execute('select code from objects where id=?',(src,)))[0]
)
dst = self.deserialize(
next(self._execute('select code from objects where id=?',(dst,)))[0]
)
yield src, name, dst | list every relation in the database as (src, relation, dst) | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L366-L378 |
keredson/peewee-db-evolve | peeweedbevolve.py | calc_changes | def calc_changes(db, ignore_tables=None):
migrator = None # expose eventually?
if migrator is None:
migrator = auto_detect_migrator(db)
existing_tables = [unicode(t) for t in db.get_tables()]
existing_indexes = {table:get_indexes_by_table(db, table) for table in existing_tables}
existing_columns_by_table = get_columns_by_table(db)
foreign_keys_by_table = get_foreign_keys_by_table(db)
table_names_to_models = {_table_name(cls): cls for cls in all_models.keys()}
to_run = []
table_adds, add_fks, table_deletes, table_renames = calc_table_changes(existing_tables, ignore_tables=ignore_tables)
table_renamed_from = {v: k for k, v in table_renames.items()}
for tbl in table_adds:
to_run += create_table(table_names_to_models[tbl])
for field in add_fks:
if hasattr(field, '__pwdbev__not_deferred') and field.__pwdbev__not_deferred:
field.deferred = False
to_run += create_foreign_key(field)
for k, v in table_renames.items():
to_run += rename_table(migrator, k, v)
rename_cols_by_table = {}
deleted_cols_by_table = {}
for etn, ecols in existing_columns_by_table.items():
if etn in table_deletes: continue
ntn = table_renames.get(etn, etn)
model = table_names_to_models.get(ntn)
if not model: continue
defined_fields = model._meta.sorted_fields
defined_column_name_to_field = {unicode(_column_name(f)):f for f in defined_fields}
existing_fks_by_column = {fk.column:fk for fk in foreign_keys_by_table[etn]}
adds, deletes, renames, alter_statements = calc_column_changes(db, migrator, etn, ntn, ecols, defined_fields, existing_fks_by_column)
for column_name in adds:
field = defined_column_name_to_field[column_name]
to_run += alter_add_column(db, migrator, ntn, column_name, field)
if not field.null:
# alter_add_column strips null constraints
# add them back after setting any defaults
if field.default is not None:
to_run += set_default(db, migrator, ntn, column_name, field)
else:
to_run.append(('-- adding a not null column without a default will fail if the table is not empty',[]))
to_run += add_not_null(db, migrator, ntn, column_name, field)
for column_name in deletes:
fk = existing_fks_by_column.get(column_name)
if fk:
to_run += drop_foreign_key(db, migrator, ntn, fk.name)
to_run += drop_column(db, migrator, ntn, column_name)
for ocn, ncn in renames.items():
field = defined_column_name_to_field[ncn]
to_run += rename_column(db, migrator, ntn, ocn, ncn, field)
to_run += alter_statements
rename_cols_by_table[ntn] = renames
deleted_cols_by_table[ntn] = deletes
for ntn, model in table_names_to_models.items():
etn = table_renamed_from.get(ntn, ntn)
deletes = deleted_cols_by_table.get(ntn,set())
existing_indexes_for_table = [i for i in existing_indexes.get(etn, []) if not any([(c in deletes) for c in i.columns])]
to_run += calc_index_changes(db, migrator, existing_indexes_for_table, model, rename_cols_by_table.get(ntn, {}))
'''
to_run += calc_perms_changes($schema_tables, noop) unless $check_perms_for.empty?
'''
for tbl in table_deletes:
to_run += drop_table(migrator, tbl)
return to_run | python | def calc_changes(db, ignore_tables=None):
migrator = None # expose eventually?
if migrator is None:
migrator = auto_detect_migrator(db)
existing_tables = [unicode(t) for t in db.get_tables()]
existing_indexes = {table:get_indexes_by_table(db, table) for table in existing_tables}
existing_columns_by_table = get_columns_by_table(db)
foreign_keys_by_table = get_foreign_keys_by_table(db)
table_names_to_models = {_table_name(cls): cls for cls in all_models.keys()}
to_run = []
table_adds, add_fks, table_deletes, table_renames = calc_table_changes(existing_tables, ignore_tables=ignore_tables)
table_renamed_from = {v: k for k, v in table_renames.items()}
for tbl in table_adds:
to_run += create_table(table_names_to_models[tbl])
for field in add_fks:
if hasattr(field, '__pwdbev__not_deferred') and field.__pwdbev__not_deferred:
field.deferred = False
to_run += create_foreign_key(field)
for k, v in table_renames.items():
to_run += rename_table(migrator, k, v)
rename_cols_by_table = {}
deleted_cols_by_table = {}
for etn, ecols in existing_columns_by_table.items():
if etn in table_deletes: continue
ntn = table_renames.get(etn, etn)
model = table_names_to_models.get(ntn)
if not model: continue
defined_fields = model._meta.sorted_fields
defined_column_name_to_field = {unicode(_column_name(f)):f for f in defined_fields}
existing_fks_by_column = {fk.column:fk for fk in foreign_keys_by_table[etn]}
adds, deletes, renames, alter_statements = calc_column_changes(db, migrator, etn, ntn, ecols, defined_fields, existing_fks_by_column)
for column_name in adds:
field = defined_column_name_to_field[column_name]
to_run += alter_add_column(db, migrator, ntn, column_name, field)
if not field.null:
# alter_add_column strips null constraints
# add them back after setting any defaults
if field.default is not None:
to_run += set_default(db, migrator, ntn, column_name, field)
else:
to_run.append(('-- adding a not null column without a default will fail if the table is not empty',[]))
to_run += add_not_null(db, migrator, ntn, column_name, field)
for column_name in deletes:
fk = existing_fks_by_column.get(column_name)
if fk:
to_run += drop_foreign_key(db, migrator, ntn, fk.name)
to_run += drop_column(db, migrator, ntn, column_name)
for ocn, ncn in renames.items():
field = defined_column_name_to_field[ncn]
to_run += rename_column(db, migrator, ntn, ocn, ncn, field)
to_run += alter_statements
rename_cols_by_table[ntn] = renames
deleted_cols_by_table[ntn] = deletes
for ntn, model in table_names_to_models.items():
etn = table_renamed_from.get(ntn, ntn)
deletes = deleted_cols_by_table.get(ntn,set())
existing_indexes_for_table = [i for i in existing_indexes.get(etn, []) if not any([(c in deletes) for c in i.columns])]
to_run += calc_index_changes(db, migrator, existing_indexes_for_table, model, rename_cols_by_table.get(ntn, {}))
'''
to_run += calc_perms_changes($schema_tables, noop) unless $check_perms_for.empty?
'''
for tbl in table_deletes:
to_run += drop_table(migrator, tbl)
return to_run | to_run += calc_perms_changes($schema_tables, noop) unless $check_perms_for.empty? | https://github.com/keredson/peewee-db-evolve/blob/0cc4dc33935f01e2c2b4b8778a4a0ee10de22023/peeweedbevolve.py#L604-L676 |
StorjOld/pyp2p | pyp2p/sock.py | Sock.set_keep_alive | def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60,
max_fails=5):
"""
This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds
"""
# OSX
if platform.system() == "Darwin":
# scraped from /usr/include, not exported by python's socket module
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
if platform.system() == "Windows":
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000))
if platform.system() == "Linux":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | python | def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60,
max_fails=5):
"""
This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds
"""
# OSX
if platform.system() == "Darwin":
# scraped from /usr/include, not exported by python's socket module
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
if platform.system() == "Windows":
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000))
if platform.system() == "Linux":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sock.py#L99-L128 |
StorjOld/pyp2p | pyp2p/sock.py | Sock.parse_buf | def parse_buf(self, encoding="unicode"):
"""
Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter.
"""
buf_len = len(self.buf)
replies = []
reply = b""
chop = 0
skip = 0
i = 0
buf_len = len(self.buf)
for i in range(0, buf_len):
ch = self.buf[i:i + 1]
if skip:
skip -= 1
i += 1
continue
nxt = i + 1
if nxt < buf_len:
if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n":
# Append new reply.
if reply != b"":
if encoding == "unicode":
replies.append(encode_str(reply, encoding))
else:
replies.append(reply)
reply = b""
# Truncate the whole buf if chop is out of bounds.
chop = nxt + 1
skip = 1
i += 1
continue
reply += ch
i += 1
# Truncate buf.
if chop:
self.buf = self.buf[chop:]
return replies | python | def parse_buf(self, encoding="unicode"):
"""
Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter.
"""
buf_len = len(self.buf)
replies = []
reply = b""
chop = 0
skip = 0
i = 0
buf_len = len(self.buf)
for i in range(0, buf_len):
ch = self.buf[i:i + 1]
if skip:
skip -= 1
i += 1
continue
nxt = i + 1
if nxt < buf_len:
if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n":
# Append new reply.
if reply != b"":
if encoding == "unicode":
replies.append(encode_str(reply, encoding))
else:
replies.append(reply)
reply = b""
# Truncate the whole buf if chop is out of bounds.
chop = nxt + 1
skip = 1
i += 1
continue
reply += ch
i += 1
# Truncate buf.
if chop:
self.buf = self.buf[chop:]
return replies | Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sock.py#L221-L267 |
StorjOld/pyp2p | pyp2p/sock.py | Sock.get_chunks | def get_chunks(self, fixed_limit=None, encoding="unicode"):
"""
This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r
http://stackoverflow.com/questions/3187565/select-and-ssl-in-python
"""
# Socket is disconnected.
if not self.connected:
return
# Recv chunks until network buffer is empty.
repeat = 1
wait = 0.2
chunk_no = 0
max_buf = self.max_buf
max_chunks = self.max_chunks
if fixed_limit is not None:
max_buf = fixed_limit
max_chunks = fixed_limit
while repeat:
chunk_size = self.chunk_size
while True:
# Don't exceed buffer size.
buf_len = len(self.buf)
if buf_len >= max_buf:
break
remaining = max_buf - buf_len
if remaining < chunk_size:
chunk_size = remaining
# Don't allow non-blocking sockets to be
# DoSed by multiple small replies.
if chunk_no >= max_chunks and not self.blocking:
break
try:
chunk = self.s.recv(chunk_size)
except socket.timeout as e:
self.debug_print("Get chunks timed out.")
self.debug_print(e)
# Timeout on blocking sockets.
err = e.args[0]
self.debug_print(err)
if err == "timed out":
repeat = 0
break
except ssl.SSLError as e:
# Will block on non-blocking SSL sockets.
if e.errno == ssl.SSL_ERROR_WANT_READ:
self.debug_print("SSL_ERROR_WANT_READ")
break
else:
self.debug_print("Get chunks ssl error")
self.close()
return
except socket.error as e:
# Will block on nonblocking non-SSL sockets.
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
break
else:
# Connection closed or other problem.
self.debug_print("get chunks other closing")
self.close()
return
else:
if chunk == b"":
self.close()
return
# Avoid decoding errors.
self.buf += chunk
# Otherwise the loop will be endless.
if self.blocking:
break
# Used to avoid DoS of small packets.
chunk_no += 1
# Repeat is already set -- manual skip.
if not repeat:
break
else:
repeat = 0
# Block until there's a full reply or there's a timeout.
if self.blocking:
if fixed_limit is None:
# Partial response.
if self.delimiter not in self.buf:
repeat = 1
time.sleep(wait) | python | def get_chunks(self, fixed_limit=None, encoding="unicode"):
"""
This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r
http://stackoverflow.com/questions/3187565/select-and-ssl-in-python
"""
# Socket is disconnected.
if not self.connected:
return
# Recv chunks until network buffer is empty.
repeat = 1
wait = 0.2
chunk_no = 0
max_buf = self.max_buf
max_chunks = self.max_chunks
if fixed_limit is not None:
max_buf = fixed_limit
max_chunks = fixed_limit
while repeat:
chunk_size = self.chunk_size
while True:
# Don't exceed buffer size.
buf_len = len(self.buf)
if buf_len >= max_buf:
break
remaining = max_buf - buf_len
if remaining < chunk_size:
chunk_size = remaining
# Don't allow non-blocking sockets to be
# DoSed by multiple small replies.
if chunk_no >= max_chunks and not self.blocking:
break
try:
chunk = self.s.recv(chunk_size)
except socket.timeout as e:
self.debug_print("Get chunks timed out.")
self.debug_print(e)
# Timeout on blocking sockets.
err = e.args[0]
self.debug_print(err)
if err == "timed out":
repeat = 0
break
except ssl.SSLError as e:
# Will block on non-blocking SSL sockets.
if e.errno == ssl.SSL_ERROR_WANT_READ:
self.debug_print("SSL_ERROR_WANT_READ")
break
else:
self.debug_print("Get chunks ssl error")
self.close()
return
except socket.error as e:
# Will block on nonblocking non-SSL sockets.
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
break
else:
# Connection closed or other problem.
self.debug_print("get chunks other closing")
self.close()
return
else:
if chunk == b"":
self.close()
return
# Avoid decoding errors.
self.buf += chunk
# Otherwise the loop will be endless.
if self.blocking:
break
# Used to avoid DoS of small packets.
chunk_no += 1
# Repeat is already set -- manual skip.
if not repeat:
break
else:
repeat = 0
# Block until there's a full reply or there's a timeout.
if self.blocking:
if fixed_limit is None:
# Partial response.
if self.delimiter not in self.buf:
repeat = 1
time.sleep(wait) | This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r
http://stackoverflow.com/questions/3187565/select-and-ssl-in-python | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sock.py#L270-L369 |
StorjOld/pyp2p | pyp2p/net.py | Net.validate_node | def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
"""
Don't accept connections from self to passive server
or connections to already connected nodes.
"""
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1 | python | def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
"""
Don't accept connections from self to passive server
or connections to already connected nodes.
"""
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1 | Don't accept connections from self to passive server
or connections to already connected nodes. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L330-L373 |
StorjOld/pyp2p | pyp2p/net.py | Net.bootstrap | def bootstrap(self):
"""
When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots.
"""
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | python | def bootstrap(self):
"""
When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots.
"""
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L481-L560 |
StorjOld/pyp2p | pyp2p/net.py | Net.advertise | def advertise(self):
"""
This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p
"""
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | python | def advertise(self):
"""
This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p
"""
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L562-L613 |
StorjOld/pyp2p | pyp2p/net.py | Net.determine_node | def determine_node(self):
"""
Determines the type of node based on a combination of forwarding
reachability and NAT type.
"""
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active" | python | def determine_node(self):
"""
Determines the type of node based on a combination of forwarding
reachability and NAT type.
"""
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active" | Determines the type of node based on a combination of forwarding
reachability and NAT type. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L615-L697 |
StorjOld/pyp2p | pyp2p/net.py | Net.start | def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self | python | def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self | This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L710-L795 |
StorjOld/pyp2p | pyp2p/net.py | Net.stop | def stop(self, signum=None, frame=None):
self.debug_print("Stopping networking.")
if self.passive is not None:
try:
self.passive.shutdown(1)
except:
pass
self.passive.close()
self.passive = None
if self.last_advertise is not None:
self.rendezvous.leave_fight()
"""
Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit()
"""
for con in self:
con.close()
if signum is not None:
raise Exception("Process was interrupted.") | python | def stop(self, signum=None, frame=None):
self.debug_print("Stopping networking.")
if self.passive is not None:
try:
self.passive.shutdown(1)
except:
pass
self.passive.close()
self.passive = None
if self.last_advertise is not None:
self.rendezvous.leave_fight()
"""
Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit()
"""
for con in self:
con.close()
if signum is not None:
raise Exception("Process was interrupted.") | Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit() | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L797-L822 |
StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.send_remote_port | def send_remote_port(self):
"""
Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port.
"""
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | python | def send_remote_port(self):
"""
Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port.
"""
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L76-L84 |
StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.cleanup_candidates | def cleanup_candidates(self, node_ip):
"""
Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
old_candidates.append(candidate)
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate) | python | def cleanup_candidates(self, node_ip):
"""
Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
old_candidates.append(candidate)
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate) | Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L108-L122 |
StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.propogate_candidates | def propogate_candidates(self, node_ip):
"""
Used to progate new candidates to passive simultaneous
nodes.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
# Already sent -- updated when they accept this challenge.
if candidate["propogated"]:
continue
# Notify node of challege from client.
msg = "CHALLENGE %s %s %s" % (
candidate["ip_addr"],
" ".join(map(str, candidate["predictions"])),
candidate["proto"])
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
old_candidates.append(candidate) | python | def propogate_candidates(self, node_ip):
"""
Used to progate new candidates to passive simultaneous
nodes.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
# Already sent -- updated when they accept this challenge.
if candidate["propogated"]:
continue
# Notify node of challege from client.
msg = "CHALLENGE %s %s %s" % (
candidate["ip_addr"],
" ".join(map(str, candidate["predictions"])),
candidate["proto"])
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
old_candidates.append(candidate) | Used to progate new candidates to passive simultaneous
nodes. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L124-L149 |
StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.synchronize_simultaneous | def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) | python | def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) | Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L151-L177 |
StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.connectionLost | def connectionLost(self, reason):
"""
Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections.
"""
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanup:
self.factory.last_cleanup = t
# Delete old passive nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["passive"]):
passive_node = self.factory.nodes["passive"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - passive_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["passive"][node_ip]
# Delete old simultaneous nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["simultaneous"]):
simultaneous_node =\
self.factory.nodes["simultaneous"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - simultaneous_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["simultaneous"][node_ip]
# Delete old candidates and candidate structs.
old_node_ips = []
for node_ip in list(self.factory.candidates):
# Record old candidates.
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Hole punching is ms time sensitive.
# Candidates older than this is safe to assume
# they're not needed.
if node_ip not in self.factory.nodes["simultaneous"] \
and t - candidate["time"] >= self.challenge_timeout * 5:
old_candidates.append(candidate)
# Remove old candidates.
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
# Record old node IPs.
if not len(self.factory.candidates[node_ip]) and \
node_ip not in self.factory.nodes["simultaneous"]:
old_node_ips.append(node_ip)
# Remove old node IPs.
for node_ip in old_node_ips:
del self.factory.candidates[node_ip]
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error)) | python | def connectionLost(self, reason):
"""
Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections.
"""
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanup:
self.factory.last_cleanup = t
# Delete old passive nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["passive"]):
passive_node = self.factory.nodes["passive"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - passive_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["passive"][node_ip]
# Delete old simultaneous nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["simultaneous"]):
simultaneous_node =\
self.factory.nodes["simultaneous"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - simultaneous_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["simultaneous"][node_ip]
# Delete old candidates and candidate structs.
old_node_ips = []
for node_ip in list(self.factory.candidates):
# Record old candidates.
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Hole punching is ms time sensitive.
# Candidates older than this is safe to assume
# they're not needed.
if node_ip not in self.factory.nodes["simultaneous"] \
and t - candidate["time"] >= self.challenge_timeout * 5:
old_candidates.append(candidate)
# Remove old candidates.
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
# Record old node IPs.
if not len(self.factory.candidates[node_ip]) and \
node_ip not in self.factory.nodes["simultaneous"]:
old_node_ips.append(node_ip)
# Remove old node IPs.
for node_ip in old_node_ips:
del self.factory.candidates[node_ip]
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error)) | Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L197-L261 |
StorjOld/pyp2p | pyp2p/ipgetter.py | IPgetter.get_external_ip | def get_external_ip(self):
"""
This function gets your IP from a random server
"""
random.shuffle(self.server_list)
myip = ''
for server in self.server_list[:3]:
myip = self.fetch(server)
if myip != '':
return myip
else:
continue
return '' | python | def get_external_ip(self):
"""
This function gets your IP from a random server
"""
random.shuffle(self.server_list)
myip = ''
for server in self.server_list[:3]:
myip = self.fetch(server)
if myip != '':
return myip
else:
continue
return '' | This function gets your IP from a random server | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/ipgetter.py#L109-L122 |
StorjOld/pyp2p | pyp2p/ipgetter.py | IPgetter.fetch | def fetch(self, server):
"""
This function gets your IP from a specific server
"""
t = None
socket_default_timeout = socket.getdefaulttimeout()
opener = urllib.build_opener()
opener.addheaders = [('User-agent',
"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)"
" Gecko/20100101 Firefox/24.0")]
try:
# Close url resource if fetching not finished within timeout.
t = Timer(self.timeout, self.handle_timeout, [self.url])
t.start()
# Open URL.
if version_info[0:2] == (2, 5):
# Support for Python 2.5.* using socket hack
# (Changes global socket timeout.)
socket.setdefaulttimeout(self.timeout)
self.url = opener.open(server)
else:
self.url = opener.open(server, timeout=self.timeout)
# Read response.
content = self.url.read()
# Didn't want to import chardet. Prefered to stick to stdlib
if PY3K:
try:
content = content.decode('UTF-8')
except UnicodeDecodeError:
content = content.decode('ISO-8859-1')
p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.('
p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|['
p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
m = re.search(
p,
content)
myip = m.group(0)
if len(myip) > 0:
return myip
else:
return ''
except Exception as e:
print(e)
return ''
finally:
if self.url is not None:
self.url.close()
self.url = None
if t is not None:
t.cancel()
# Reset default socket timeout.
if socket.getdefaulttimeout() != socket_default_timeout:
socket.setdefaulttimeout(socket_default_timeout) | python | def fetch(self, server):
"""
This function gets your IP from a specific server
"""
t = None
socket_default_timeout = socket.getdefaulttimeout()
opener = urllib.build_opener()
opener.addheaders = [('User-agent',
"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)"
" Gecko/20100101 Firefox/24.0")]
try:
# Close url resource if fetching not finished within timeout.
t = Timer(self.timeout, self.handle_timeout, [self.url])
t.start()
# Open URL.
if version_info[0:2] == (2, 5):
# Support for Python 2.5.* using socket hack
# (Changes global socket timeout.)
socket.setdefaulttimeout(self.timeout)
self.url = opener.open(server)
else:
self.url = opener.open(server, timeout=self.timeout)
# Read response.
content = self.url.read()
# Didn't want to import chardet. Prefered to stick to stdlib
if PY3K:
try:
content = content.decode('UTF-8')
except UnicodeDecodeError:
content = content.decode('ISO-8859-1')
p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.('
p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|['
p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
m = re.search(
p,
content)
myip = m.group(0)
if len(myip) > 0:
return myip
else:
return ''
except Exception as e:
print(e)
return ''
finally:
if self.url is not None:
self.url.close()
self.url = None
if t is not None:
t.cancel()
# Reset default socket timeout.
if socket.getdefaulttimeout() != socket_default_timeout:
socket.setdefaulttimeout(socket_default_timeout) | This function gets your IP from a specific server | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/ipgetter.py#L129-L187 |
StorjOld/pyp2p | pyp2p/unl.py | UNL.connect_handler | def connect_handler(self, their_unl, events, force_master, hairpin, nonce):
# Figure out who should make the connection.
our_unl = self.value.encode("ascii")
their_unl = their_unl.encode("ascii")
master = self.is_master(their_unl)
"""
Master defines who connects if either side can. It's used to
eliminate having multiple connections with the same host.
"""
if force_master:
master = 1
# Deconstruct binary UNLs into dicts.
our_unl = self.deconstruct(our_unl)
their_unl = self.deconstruct(their_unl)
if our_unl is None:
raise Exception("Unable to deconstruct our UNL.")
if their_unl is None:
raise Exception("Unable to deconstruct their UNL.")
# This means the nodes are behind the same router.
if our_unl["wan_ip"] == their_unl["wan_ip"]:
# Connect to LAN IP.
our_unl["wan_ip"] = our_unl["lan_ip"]
their_unl["wan_ip"] = their_unl["lan_ip"]
# Already behind NAT so no forwarding needed.
if hairpin:
our_unl["node_type"] = "passive"
their_unl["node_type"] = "passive"
# Generate con ID.
if nonce != "0" * 64:
# Convert nonce to bytes.
if sys.version_info >= (3, 0, 0):
if type(nonce) == str:
nonce.encode("ascii")
else:
if type(nonce) == unicode:
nonce = str(nonce)
# Check nonce length.
assert(len(nonce) == 64)
# Create con ID.
con_id = self.net.generate_con_id(
nonce,
our_unl["wan_ip"],
their_unl["wan_ip"]
)
else:
con_id = None
# Acquire mutex.
self.mutex.acquire()
# Wait for other UNLs to finish.
end_time = time.time()
end_time += len(self.pending_unls) * 60
self.debug_print("Waiting for other unls to finish")
while their_unl in self.pending_unls and time.time() < end_time:
# This is an undifferentiated duplicate.
if events is None:
self.mutex.release()
return
time.sleep(1)
self.debug_print("Other unl finished")
is_exception = 0
try:
# Wait for any other hole punches to finish.
if (their_unl["node_type"] == "simultaneous" and
our_unl["node_type"] != "passive"):
self.pending_sim_open.append(their_unl["value"])
end_time = time.time()
end_time += len(self.pending_unls) * 60
self.debug_print("wait for other hole punches to finish")
while len(self.pending_sim_open) and time.time() < end_time:
if self.pending_sim_open[0] == their_unl["value"]:
break
time.sleep(1)
self.debug_print("other hole punches finished")
# Set pending UNL.
self.pending_unls.append(their_unl)
# Release mutex.
self.mutex.release()
# Get connection.
con = self.get_connection(
our_unl,
their_unl,
master,
nonce,
force_master,
con_id
)
except Exception as e:
is_exception = 1
print(e)
print("EXCEPTION IN UNL.GET_CONNECTION")
log_exception("error.log", parse_exception(e))
finally:
# Release mutex.
if self.mutex.locked() and is_exception:
self.mutex.release()
# Undo pending connect state.
if their_unl in self.pending_unls:
self.pending_unls.remove(their_unl)
# Undo pending sim open.
if len(self.pending_sim_open):
if self.pending_sim_open[0] == their_unl["value"]:
self.pending_sim_open = self.pending_sim_open[1:]
# Only execute events if this function was called manually.
if events is not None:
# Success.
if con is not None:
if "success" in events:
events["success"](con)
# Failure.
if con is None:
if "failure" in events:
events["failure"](con) | python | def connect_handler(self, their_unl, events, force_master, hairpin, nonce):
# Figure out who should make the connection.
our_unl = self.value.encode("ascii")
their_unl = their_unl.encode("ascii")
master = self.is_master(their_unl)
"""
Master defines who connects if either side can. It's used to
eliminate having multiple connections with the same host.
"""
if force_master:
master = 1
# Deconstruct binary UNLs into dicts.
our_unl = self.deconstruct(our_unl)
their_unl = self.deconstruct(their_unl)
if our_unl is None:
raise Exception("Unable to deconstruct our UNL.")
if their_unl is None:
raise Exception("Unable to deconstruct their UNL.")
# This means the nodes are behind the same router.
if our_unl["wan_ip"] == their_unl["wan_ip"]:
# Connect to LAN IP.
our_unl["wan_ip"] = our_unl["lan_ip"]
their_unl["wan_ip"] = their_unl["lan_ip"]
# Already behind NAT so no forwarding needed.
if hairpin:
our_unl["node_type"] = "passive"
their_unl["node_type"] = "passive"
# Generate con ID.
if nonce != "0" * 64:
# Convert nonce to bytes.
if sys.version_info >= (3, 0, 0):
if type(nonce) == str:
nonce.encode("ascii")
else:
if type(nonce) == unicode:
nonce = str(nonce)
# Check nonce length.
assert(len(nonce) == 64)
# Create con ID.
con_id = self.net.generate_con_id(
nonce,
our_unl["wan_ip"],
their_unl["wan_ip"]
)
else:
con_id = None
# Acquire mutex.
self.mutex.acquire()
# Wait for other UNLs to finish.
end_time = time.time()
end_time += len(self.pending_unls) * 60
self.debug_print("Waiting for other unls to finish")
while their_unl in self.pending_unls and time.time() < end_time:
# This is an undifferentiated duplicate.
if events is None:
self.mutex.release()
return
time.sleep(1)
self.debug_print("Other unl finished")
is_exception = 0
try:
# Wait for any other hole punches to finish.
if (their_unl["node_type"] == "simultaneous" and
our_unl["node_type"] != "passive"):
self.pending_sim_open.append(their_unl["value"])
end_time = time.time()
end_time += len(self.pending_unls) * 60
self.debug_print("wait for other hole punches to finish")
while len(self.pending_sim_open) and time.time() < end_time:
if self.pending_sim_open[0] == their_unl["value"]:
break
time.sleep(1)
self.debug_print("other hole punches finished")
# Set pending UNL.
self.pending_unls.append(their_unl)
# Release mutex.
self.mutex.release()
# Get connection.
con = self.get_connection(
our_unl,
their_unl,
master,
nonce,
force_master,
con_id
)
except Exception as e:
is_exception = 1
print(e)
print("EXCEPTION IN UNL.GET_CONNECTION")
log_exception("error.log", parse_exception(e))
finally:
# Release mutex.
if self.mutex.locked() and is_exception:
self.mutex.release()
# Undo pending connect state.
if their_unl in self.pending_unls:
self.pending_unls.remove(their_unl)
# Undo pending sim open.
if len(self.pending_sim_open):
if self.pending_sim_open[0] == their_unl["value"]:
self.pending_sim_open = self.pending_sim_open[1:]
# Only execute events if this function was called manually.
if events is not None:
# Success.
if con is not None:
if "success" in events:
events["success"](con)
# Failure.
if con is None:
if "failure" in events:
events["failure"](con) | Master defines who connects if either side can. It's used to
eliminate having multiple connections with the same host. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/unl.py#L239-L373 |
StorjOld/pyp2p | pyp2p/unl.py | UNL.connect | def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
"""
A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution.
"""
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | python | def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
"""
A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution.
"""
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/unl.py#L375-L386 |
StorjOld/pyp2p | pyp2p/sys_clock.py | SysClock.calculate_clock_skew | def calculate_clock_skew(self):
"""
Computer average and standard deviation
using all the data points.
"""
n = self.statx_n(self.data_points)
"""
Required to be able to compute the standard
deviation.
"""
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
"""
Incrementally remove aberration points.
"""
for k in range(0, self.clean_steps):
"""
Remove aberration points: keep only
the sigma range around the average.
"""
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue
cleaned_data_points.append(v)
self.data_points = cleaned_data_points[:]
"""
Recompute the new average using the
"sound" points we kept.
"""
n = self.statx_n(self.data_points)
"""
Not enough data to compute standard
deviation.
"""
if n < 2:
break
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break
"""
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
"""
if sdev > self.max_sdev or n < self.min_data:
return Decimal("0")
return avg | python | def calculate_clock_skew(self):
"""
Computer average and standard deviation
using all the data points.
"""
n = self.statx_n(self.data_points)
"""
Required to be able to compute the standard
deviation.
"""
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
"""
Incrementally remove aberration points.
"""
for k in range(0, self.clean_steps):
"""
Remove aberration points: keep only
the sigma range around the average.
"""
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue
cleaned_data_points.append(v)
self.data_points = cleaned_data_points[:]
"""
Recompute the new average using the
"sound" points we kept.
"""
n = self.statx_n(self.data_points)
"""
Not enough data to compute standard
deviation.
"""
if n < 2:
break
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break
"""
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
"""
if sdev > self.max_sdev or n < self.min_data:
return Decimal("0")
return avg | Computer average and standard deviation
using all the data points. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sys_clock.py#L66-L132 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.attend_fight | def attend_fight(self, mappings, node_ip, predictions, ntp):
"""
This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function.
"""
# Bind listen server socket.
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
# Walk to fight.
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simultaneous_fight(mappings, node_ip, predictions, ntp)
# Return hole made in opponent.
if len(self.simultaneous_cons):
"""
There may be a problem here. I noticed that when these lines
were removed during testing that connections tended to
succeed more. There may be a lack of synchronization between
the timing for connections to succeed so that a close on
one side of the fight ends up ruining valid connections on
this side. Will need to test more.
Notes: the UNL synchronization code could actually fix
this (potential) problem as a cool unintended side-effect.
"""
# Close unneeded holes.
"""
for i in range(1, len(self.simultaneous_cons)):
try:
print("Closing unneeded hole")
#self.simultaneous_cons[i].s.close()
except:
pass
"""
try:
# Return open hole.
return self.simultaneous_cons[0]
except:
# Try accept a connection.
log.debug("No holes found")
for mapping in mappings:
# Check if there's a new con.
s = mapping["listen"]
r, w, e = select.select(
[s],
[],
[],
0
)
# Find socket.
for found_sock in r:
# Not us.
if found_sock != s:
continue
# Accept a new con from the listen queue.
log.debug("Accept logic works!")
client, address = s.accept()
con = Sock(blocking=0)
con.set_sock(client)
return con
return None | python | def attend_fight(self, mappings, node_ip, predictions, ntp):
"""
This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function.
"""
# Bind listen server socket.
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
# Walk to fight.
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simultaneous_fight(mappings, node_ip, predictions, ntp)
# Return hole made in opponent.
if len(self.simultaneous_cons):
"""
There may be a problem here. I noticed that when these lines
were removed during testing that connections tended to
succeed more. There may be a lack of synchronization between
the timing for connections to succeed so that a close on
one side of the fight ends up ruining valid connections on
this side. Will need to test more.
Notes: the UNL synchronization code could actually fix
this (potential) problem as a cool unintended side-effect.
"""
# Close unneeded holes.
"""
for i in range(1, len(self.simultaneous_cons)):
try:
print("Closing unneeded hole")
#self.simultaneous_cons[i].s.close()
except:
pass
"""
try:
# Return open hole.
return self.simultaneous_cons[0]
except:
# Try accept a connection.
log.debug("No holes found")
for mapping in mappings:
# Check if there's a new con.
s = mapping["listen"]
r, w, e = select.select(
[s],
[],
[],
0
)
# Find socket.
for found_sock in r:
# Not us.
if found_sock != s:
continue
# Accept a new con from the listen queue.
log.debug("Accept logic works!")
client, address = s.accept()
con = Sock(blocking=0)
con.set_sock(client)
return con
return None | This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L139-L209 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.sequential_connect | def sequential_connect(self):
"""
Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve the delta value when
the source ports increase by one.
"""
# Connect to rendezvous server.
try:
mappings = sequential_bind(self.mapping_no + 1, self.interface)
con = self.server_connect(mappings[0]["sock"])
except Exception as e:
log.debug(e)
log.debug("this err")
return None
# First mapping is used to talk to server.
mappings.remove(mappings[0])
# Receive port mapping.
msg = "SOURCE TCP %s" % (str(mappings[0]["source"]))
con.send_line(msg)
reply = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(reply)
if not remote_port:
return None
# Generate port predictions.
predictions = ""
if self.nat_type != "random":
mappings = self.predict_mappings(mappings)
for mapping in mappings:
predictions += str(mapping["remote"]) + " "
predictions = predictions.rstrip()
else:
predictions = "1337"
return [con, mappings, predictions] | python | def sequential_connect(self):
"""
Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve the delta value when
the source ports increase by one.
"""
# Connect to rendezvous server.
try:
mappings = sequential_bind(self.mapping_no + 1, self.interface)
con = self.server_connect(mappings[0]["sock"])
except Exception as e:
log.debug(e)
log.debug("this err")
return None
# First mapping is used to talk to server.
mappings.remove(mappings[0])
# Receive port mapping.
msg = "SOURCE TCP %s" % (str(mappings[0]["source"]))
con.send_line(msg)
reply = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(reply)
if not remote_port:
return None
# Generate port predictions.
predictions = ""
if self.nat_type != "random":
mappings = self.predict_mappings(mappings)
for mapping in mappings:
predictions += str(mapping["remote"]) + " "
predictions = predictions.rstrip()
else:
predictions = "1337"
return [con, mappings, predictions] | Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve the delta value when
the source ports increase by one. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L211-L251 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.simultaneous_listen | def simultaneous_listen(self):
"""
This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests.
"""
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1 | python | def simultaneous_listen(self):
"""
This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests.
"""
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1 | This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L253-L286 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.predict_mappings | def predict_mappings(self, mappings):
"""
This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour.
"""
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings | python | def predict_mappings(self, mappings):
"""
This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour.
"""
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings | This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L298-L330 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.throw_punch | def throw_punch(self, args, tries=1):
"""
Attempt to open a hole by TCP hole punching. This
function is called by the simultaneous fight function
and its the code that handles doing the actual hole
punching / connecting.
"""
# Parse arguments.
if len(args) != 3:
return 0
sock, node_ip, remote_port = args
if sock is None or node_ip is None or remote_port is None:
return 0
# Generous timeout.
con = Sock(blocking=1, interface=self.interface)
con.set_sock(sock)
local = 0
if is_ip_private(node_ip):
"""
When simulating nodes on the same computer a delay needs to be set
for the loop back interface to simulate the delays that occur over
a WAN link. This requirement may also be needed for nodes on a LAN.
sudo tc qdisc replace dev lo root handle 1:0 netem delay 0.5sec
Speculation: The simulation problem may be to do with CPU cores.
If the program is run on the same core then the connects will always
be out of sync. If that's the case -- tries will need to be set to
~1000 which was what it was before. Perhaps a delay could be
simulated by sleeping for random periods if its a local connection?
That could help punch through at least once and then just set the
tries to >= 1000.
"""
tries = 20 # 20
local = 1
source_port = sock.getsockname()[1]
error = 0
log.debug("Throwing punch")
for i in range(0, tries):
# Attempt to connect.
try:
con.connect(node_ip, remote_port)
log.debug("Sim open success!")
# FATALITY.
# Atomic operation so mutex not required.
# Record hole made.
con.set_blocking(blocking=0, timeout=5)
self.simultaneous_cons.append(con)
return 1
except Exception as e:
# Punch was blocked, opponent is strong.
e = str(parse_exception(e))
log.debug(e)
error = 1
continue
if error:
sock.close()
return 0 | python | def throw_punch(self, args, tries=1):
"""
Attempt to open a hole by TCP hole punching. This
function is called by the simultaneous fight function
and its the code that handles doing the actual hole
punching / connecting.
"""
# Parse arguments.
if len(args) != 3:
return 0
sock, node_ip, remote_port = args
if sock is None or node_ip is None or remote_port is None:
return 0
# Generous timeout.
con = Sock(blocking=1, interface=self.interface)
con.set_sock(sock)
local = 0
if is_ip_private(node_ip):
"""
When simulating nodes on the same computer a delay needs to be set
for the loop back interface to simulate the delays that occur over
a WAN link. This requirement may also be needed for nodes on a LAN.
sudo tc qdisc replace dev lo root handle 1:0 netem delay 0.5sec
Speculation: The simulation problem may be to do with CPU cores.
If the program is run on the same core then the connects will always
be out of sync. If that's the case -- tries will need to be set to
~1000 which was what it was before. Perhaps a delay could be
simulated by sleeping for random periods if its a local connection?
That could help punch through at least once and then just set the
tries to >= 1000.
"""
tries = 20 # 20
local = 1
source_port = sock.getsockname()[1]
error = 0
log.debug("Throwing punch")
for i in range(0, tries):
# Attempt to connect.
try:
con.connect(node_ip, remote_port)
log.debug("Sim open success!")
# FATALITY.
# Atomic operation so mutex not required.
# Record hole made.
con.set_blocking(blocking=0, timeout=5)
self.simultaneous_cons.append(con)
return 1
except Exception as e:
# Punch was blocked, opponent is strong.
e = str(parse_exception(e))
log.debug(e)
error = 1
continue
if error:
sock.close()
return 0 | Attempt to open a hole by TCP hole punching. This
function is called by the simultaneous fight function
and its the code that handles doing the actual hole
punching / connecting. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L332-L396 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.simultaneous_fight | def simultaneous_fight(self, my_mappings, node_ip, predictions, origin_ntp):
"""
TCP hole punching algorithm. It uses network time servers to
synchronize two nodes to connect to each other on their
predicted remote ports at the exact same time.
One thing to note is how sensitive TCP hole punching is to
timing. To open a successful connection both sides need to
have their SYN packets cross the NAT before the other side's
SYN arrives. Round-trip time for connections is 0 - 1000ms
depending on proximity. That's a very small margin of error
for hole punching, hence using NTP.
See "TCP Hole Punching" http://www.ietf.org/rfc/rfc5128.txt
and http://en.wikipedia.org/wiki/TCP_hole_punching
for more details.
"""
# Get current network time accurate to
# ~50 ms over WAN (apparently.)
p = request_priority_execution()
log.debug("Getting NTP")
if self.sys_clock is not None:
our_ntp = self.sys_clock.time()
else:
our_ntp = get_ntp()
log.debug("Our ntp = " + str(our_ntp))
if our_ntp is None:
return 0
# Synchronize code execution to occur at their NTP time + delay.
current = float(our_ntp)
future = float(origin_ntp) + float(self.ntp_delay)
sleep_time = future - current
# Check sleep time:
log.debug("Waiting for fight")
if sleep_time < 0:
log.debug("We missed the meeting! It happened " + str(-sleep_time) +
"seconds ago!")
return 0
if sleep_time >= 300:
log.debug("Future sleep time is too great!")
return 0
busy_wait(sleep_time)
release_priority_execution(p)
log.debug("At fight")
"""
Time.sleep isn't guaranteed to sleep for the time specified
which could cause synchronisation to be off between nodes
and different OS' as per the discretion of the task scheduler.
A busy wait is used to increase the accuracy of sleep.
http://stackoverflow.com/questions/17499837/python-time-sleep-vs-busy-wait-accuracy
http://stackoverflow.com/questions/1133857/how-accurate-is-pythons-time-sleep
"""
# Can you dodge my special?
"""
Making this algorithm "multi-threaded" has the potential to
ruin predicted mappings for delta type NATs and NATs that
have no care for source ports and assign incremental
ports no matter what.
"""
threads = []
log.debug("Mapping len " + str(len(my_mappings)))
for mapping in my_mappings:
# Tried all predictions.
prediction_len = len(predictions)
if not prediction_len:
break
# Throw punch.
prediction = predictions[0]
if self.nat_type == "delta":
self.throw_punch([mapping["sock"], node_ip, prediction])
else:
# Thread params.
args = ([
mapping["sock"],
node_ip,
prediction
], 20)
# Start thread.
t = Thread(
target=self.throw_punch,
args=args
)
threads.append(t)
t.start()
predictions.remove(prediction)
# Wait for threads to finish.
for t in threads:
t.join()
return 1 | python | def simultaneous_fight(self, my_mappings, node_ip, predictions, origin_ntp):
"""
TCP hole punching algorithm. It uses network time servers to
synchronize two nodes to connect to each other on their
predicted remote ports at the exact same time.
One thing to note is how sensitive TCP hole punching is to
timing. To open a successful connection both sides need to
have their SYN packets cross the NAT before the other side's
SYN arrives. Round-trip time for connections is 0 - 1000ms
depending on proximity. That's a very small margin of error
for hole punching, hence using NTP.
See "TCP Hole Punching" http://www.ietf.org/rfc/rfc5128.txt
and http://en.wikipedia.org/wiki/TCP_hole_punching
for more details.
"""
# Get current network time accurate to
# ~50 ms over WAN (apparently.)
p = request_priority_execution()
log.debug("Getting NTP")
if self.sys_clock is not None:
our_ntp = self.sys_clock.time()
else:
our_ntp = get_ntp()
log.debug("Our ntp = " + str(our_ntp))
if our_ntp is None:
return 0
# Synchronize code execution to occur at their NTP time + delay.
current = float(our_ntp)
future = float(origin_ntp) + float(self.ntp_delay)
sleep_time = future - current
# Check sleep time:
log.debug("Waiting for fight")
if sleep_time < 0:
log.debug("We missed the meeting! It happened " + str(-sleep_time) +
"seconds ago!")
return 0
if sleep_time >= 300:
log.debug("Future sleep time is too great!")
return 0
busy_wait(sleep_time)
release_priority_execution(p)
log.debug("At fight")
"""
Time.sleep isn't guaranteed to sleep for the time specified
which could cause synchronisation to be off between nodes
and different OS' as per the discretion of the task scheduler.
A busy wait is used to increase the accuracy of sleep.
http://stackoverflow.com/questions/17499837/python-time-sleep-vs-busy-wait-accuracy
http://stackoverflow.com/questions/1133857/how-accurate-is-pythons-time-sleep
"""
# Can you dodge my special?
"""
Making this algorithm "multi-threaded" has the potential to
ruin predicted mappings for delta type NATs and NATs that
have no care for source ports and assign incremental
ports no matter what.
"""
threads = []
log.debug("Mapping len " + str(len(my_mappings)))
for mapping in my_mappings:
# Tried all predictions.
prediction_len = len(predictions)
if not prediction_len:
break
# Throw punch.
prediction = predictions[0]
if self.nat_type == "delta":
self.throw_punch([mapping["sock"], node_ip, prediction])
else:
# Thread params.
args = ([
mapping["sock"],
node_ip,
prediction
], 20)
# Start thread.
t = Thread(
target=self.throw_punch,
args=args
)
threads.append(t)
t.start()
predictions.remove(prediction)
# Wait for threads to finish.
for t in threads:
t.join()
return 1 | TCP hole punching algorithm. It uses network time servers to
synchronize two nodes to connect to each other on their
predicted remote ports at the exact same time.
One thing to note is how sensitive TCP hole punching is to
timing. To open a successful connection both sides need to
have their SYN packets cross the NAT before the other side's
SYN arrives. Round-trip time for connections is 0 - 1000ms
depending on proximity. That's a very small margin of error
for hole punching, hence using NTP.
See "TCP Hole Punching" http://www.ietf.org/rfc/rfc5128.txt
and http://en.wikipedia.org/wiki/TCP_hole_punching
for more details. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L398-L498 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.simultaneous_challenge | def simultaneous_challenge(self, node_ip, node_port, proto):
"""
Used by active simultaneous nodes to attempt to initiate
a simultaneous open to a compatible node after retrieving
its details from bootstrapping. The function advertises
itself as a potential candidate to the server for the
designated node_ip. It also waits for a response from the
node (if any) and attends any arranged fights.
"""
parts = self.sequential_connect()
if parts is None:
log.debug("Sequential connect failed")
return None
con, mappings, predictions = parts
# Tell server to list ourselves as a candidate for node.
msg = "CANDIDATE %s %s %s" % (node_ip, str(proto), predictions)
con.send_line(msg)
reply = con.recv_line(timeout=10)
log.debug(reply)
if "PREDICTION SET" not in reply:
log.debug("Prediction set failed")
return None
# Wait for node to accept and give us fight time.
# FIGHT 192.168.0.1 4552 345 34235 TCP 123123123.1\
reply = con.recv_line(timeout=10)
log.debug(reply)
con.s.close()
p = "^FIGHT ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+)"
p += " (TCP|UDP) ([0-9]+(?:[.][0-9]+)?)$"
parts = re.findall(p, reply)
if not len(parts):
log.debug("Invalid parts length")
return None
node_ip, predictions, proto, ntp = parts[0]
log.debug("Received fight details")
log.debug(str(parts[0]))
log.debug("Attending fight now")
return self.attend_fight(mappings, node_ip, predictions, ntp) | python | def simultaneous_challenge(self, node_ip, node_port, proto):
"""
Used by active simultaneous nodes to attempt to initiate
a simultaneous open to a compatible node after retrieving
its details from bootstrapping. The function advertises
itself as a potential candidate to the server for the
designated node_ip. It also waits for a response from the
node (if any) and attends any arranged fights.
"""
parts = self.sequential_connect()
if parts is None:
log.debug("Sequential connect failed")
return None
con, mappings, predictions = parts
# Tell server to list ourselves as a candidate for node.
msg = "CANDIDATE %s %s %s" % (node_ip, str(proto), predictions)
con.send_line(msg)
reply = con.recv_line(timeout=10)
log.debug(reply)
if "PREDICTION SET" not in reply:
log.debug("Prediction set failed")
return None
# Wait for node to accept and give us fight time.
# FIGHT 192.168.0.1 4552 345 34235 TCP 123123123.1\
reply = con.recv_line(timeout=10)
log.debug(reply)
con.s.close()
p = "^FIGHT ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+)"
p += " (TCP|UDP) ([0-9]+(?:[.][0-9]+)?)$"
parts = re.findall(p, reply)
if not len(parts):
log.debug("Invalid parts length")
return None
node_ip, predictions, proto, ntp = parts[0]
log.debug("Received fight details")
log.debug(str(parts[0]))
log.debug("Attending fight now")
return self.attend_fight(mappings, node_ip, predictions, ntp) | Used by active simultaneous nodes to attempt to initiate
a simultaneous open to a compatible node after retrieving
its details from bootstrapping. The function advertises
itself as a potential candidate to the server for the
designated node_ip. It also waits for a response from the
node (if any) and attends any arranged fights. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L501-L542 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.parse_remote_port | def parse_remote_port(self, reply):
"""
Parses a remote port from a Rendezvous Server's
response.
"""
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
remote_port = 0
return remote_port | python | def parse_remote_port(self, reply):
"""
Parses a remote port from a Rendezvous Server's
response.
"""
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
remote_port = 0
return remote_port | Parses a remote port from a Rendezvous Server's
response. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L544-L557 |
StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.determine_nat | def determine_nat(self, return_instantly=1):
"""
This function can predict 4 types of NATS.
(Not adequately tested yet.)
1. Preserving.
Source port == remote port
2. Delta.
Remote port == source port + delta.
3. Delta+1
Same as delta but delta is only preserved when
the source port increments by 1 (my understanding I
may have misunderstood.)
- This case is handled by manually using incremental,
sequential ports for punching operations.
4. Reuse.
Same source port + addr == previous mapped remote port
for that connection.
Good NAT characteristic references and definitions:
[0] http://nutss.gforge.cis.cornell.edu/pub/imc05-tcpnat.pdf
[1] http://doc.cacaoweb.org/misc/cacaoweb-and-nats/nat-behavioral-specifications-for-p2p-applications/#tcpholepun
[2] http://www.deusty.com/2007/07/nat-traversal-port-prediction-part-2-of.html
http://www.researchgate.net/publication/239801764_Implementing_NAT_Traversal_on_BitTorrent
[3] http://en.wikipedia.org/wiki/TCP_hole_punching
"""
# Already set.
if self.nat_type != "unknown":
return self.nat_type
nat_type = "random"
# Check collision ration.
if self.port_collisions * 5 > self.nat_tests:
msg = "Port collision number is too high compared to nat tests."
msg += " Collisions must be in ratio 1 : 5 to avoid ambiguity"
msg += " in test results."
raise Exception(msg)
# Load mappings for reuse test.
"""
Notes: This reuse test needs to ideally be performed against
bootstrapping nodes on at least two different addresses and
ports to each other because there are NAT types which
allocate new mappings based on changes to these variables.
"""
def custom_server_con(port=None, servers=None):
# Get connection to rendezvous server with random
# source port specified
servers = servers or self.rendezvous_servers
con = None
while con is None:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = port or get_unused_port(None)
sock.bind(('', port))
source_port = sock.getsockname()[1]
index = random.randrange(0, len(servers))
log.debug("Trying index: " + str(index))
con = self.server_connect(sock, index, servers)
except:
time.sleep(1)
sock.close()
# Record which server we're connected to.
server = list(con.s.getpeername())[:]
server = {
"addr": server[0],
"port": server[1]
}
# Get the port mappings and instruct remote host to disconnect
# This gives them the timewait state (we also connect to another
# server anyway so as to avoid using the exact same con tuple.)
con.send_line("SOURCE TCP " + str(source_port))
remote_port = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(remote_port)
con.send_line("QUIT")
return source_port, remote_port, server
log.debug("Starting initial mappings for preserving + reuse tests")
mappings = []
for i in range(0, self.nat_tests):
src, remote, server = custom_server_con()
mappings.append({
"source": src,
"remote": int(remote),
"server": server
})
log.debug(mappings)
log.debug(len(mappings))
log.debug(self.nat_tests)
log.debug("Finished mappings")
# Preserving test.
preserving = 0
for mapping in mappings:
if mapping["source"] == mapping["remote"]:
preserving += 1
if preserving >= (self.nat_tests - self.port_collisions):
nat_type = "preserving"
if return_instantly:
return nat_type
"""
# Test reuse.
log.debug("Testing reuse")
reuse = 0
for mapping in mappings:
addr = ("www.example.com", 80)
servers = self.rendezvous_servers[:]
servers.remove(mapping["server"])
log.debug("servers = " + str(servers))
src, remote, junk = custom_server_con(mapping["source"], servers)
if remote == mapping["remote"]:
reuse += 1
# Check reuse results.
if reuse >= (self.nat_tests - self.port_collisions):
nat_type = "reuse"
if return_instantly:
return nat_type
# Load mappings for delta tests.
mappings = sequential_bind(self.nat_tests, self.interface)
for i in range(0, self.nat_tests):
con = self.server_connect(mappings[i]["sock"])
con.send_line("SOURCE TCP " + str(mappings[i]["source"]))
remote_port = self.parse_remote_port(con.recv_line(timeout=2))
mappings[i]["remote"] = int(remote_port)
con.s.close()
"""
# Delta test.
delta_ret = self.delta_test(mappings)
if delta_ret["nat_type"] != "random":
# Save delta value.
self.delta = delta_ret["delta"]
nat_type = "delta"
if return_instantly:
return nat_type
return nat_type | python | def determine_nat(self, return_instantly=1):
"""
This function can predict 4 types of NATS.
(Not adequately tested yet.)
1. Preserving.
Source port == remote port
2. Delta.
Remote port == source port + delta.
3. Delta+1
Same as delta but delta is only preserved when
the source port increments by 1 (my understanding I
may have misunderstood.)
- This case is handled by manually using incremental,
sequential ports for punching operations.
4. Reuse.
Same source port + addr == previous mapped remote port
for that connection.
Good NAT characteristic references and definitions:
[0] http://nutss.gforge.cis.cornell.edu/pub/imc05-tcpnat.pdf
[1] http://doc.cacaoweb.org/misc/cacaoweb-and-nats/nat-behavioral-specifications-for-p2p-applications/#tcpholepun
[2] http://www.deusty.com/2007/07/nat-traversal-port-prediction-part-2-of.html
http://www.researchgate.net/publication/239801764_Implementing_NAT_Traversal_on_BitTorrent
[3] http://en.wikipedia.org/wiki/TCP_hole_punching
"""
# Already set.
if self.nat_type != "unknown":
return self.nat_type
nat_type = "random"
# Check collision ration.
if self.port_collisions * 5 > self.nat_tests:
msg = "Port collision number is too high compared to nat tests."
msg += " Collisions must be in ratio 1 : 5 to avoid ambiguity"
msg += " in test results."
raise Exception(msg)
# Load mappings for reuse test.
"""
Notes: This reuse test needs to ideally be performed against
bootstrapping nodes on at least two different addresses and
ports to each other because there are NAT types which
allocate new mappings based on changes to these variables.
"""
def custom_server_con(port=None, servers=None):
# Get connection to rendezvous server with random
# source port specified
servers = servers or self.rendezvous_servers
con = None
while con is None:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = port or get_unused_port(None)
sock.bind(('', port))
source_port = sock.getsockname()[1]
index = random.randrange(0, len(servers))
log.debug("Trying index: " + str(index))
con = self.server_connect(sock, index, servers)
except:
time.sleep(1)
sock.close()
# Record which server we're connected to.
server = list(con.s.getpeername())[:]
server = {
"addr": server[0],
"port": server[1]
}
# Get the port mappings and instruct remote host to disconnect
# This gives them the timewait state (we also connect to another
# server anyway so as to avoid using the exact same con tuple.)
con.send_line("SOURCE TCP " + str(source_port))
remote_port = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(remote_port)
con.send_line("QUIT")
return source_port, remote_port, server
log.debug("Starting initial mappings for preserving + reuse tests")
mappings = []
for i in range(0, self.nat_tests):
src, remote, server = custom_server_con()
mappings.append({
"source": src,
"remote": int(remote),
"server": server
})
log.debug(mappings)
log.debug(len(mappings))
log.debug(self.nat_tests)
log.debug("Finished mappings")
# Preserving test.
preserving = 0
for mapping in mappings:
if mapping["source"] == mapping["remote"]:
preserving += 1
if preserving >= (self.nat_tests - self.port_collisions):
nat_type = "preserving"
if return_instantly:
return nat_type
"""
# Test reuse.
log.debug("Testing reuse")
reuse = 0
for mapping in mappings:
addr = ("www.example.com", 80)
servers = self.rendezvous_servers[:]
servers.remove(mapping["server"])
log.debug("servers = " + str(servers))
src, remote, junk = custom_server_con(mapping["source"], servers)
if remote == mapping["remote"]:
reuse += 1
# Check reuse results.
if reuse >= (self.nat_tests - self.port_collisions):
nat_type = "reuse"
if return_instantly:
return nat_type
# Load mappings for delta tests.
mappings = sequential_bind(self.nat_tests, self.interface)
for i in range(0, self.nat_tests):
con = self.server_connect(mappings[i]["sock"])
con.send_line("SOURCE TCP " + str(mappings[i]["source"]))
remote_port = self.parse_remote_port(con.recv_line(timeout=2))
mappings[i]["remote"] = int(remote_port)
con.s.close()
"""
# Delta test.
delta_ret = self.delta_test(mappings)
if delta_ret["nat_type"] != "random":
# Save delta value.
self.delta = delta_ret["delta"]
nat_type = "delta"
if return_instantly:
return nat_type
return nat_type | This function can predict 4 types of NATS.
(Not adequately tested yet.)
1. Preserving.
Source port == remote port
2. Delta.
Remote port == source port + delta.
3. Delta+1
Same as delta but delta is only preserved when
the source port increments by 1 (my understanding I
may have misunderstood.)
- This case is handled by manually using incremental,
sequential ports for punching operations.
4. Reuse.
Same source port + addr == previous mapped remote port
for that connection.
Good NAT characteristic references and definitions:
[0] http://nutss.gforge.cis.cornell.edu/pub/imc05-tcpnat.pdf
[1] http://doc.cacaoweb.org/misc/cacaoweb-and-nats/nat-behavioral-specifications-for-p2p-applications/#tcpholepun
[2] http://www.deusty.com/2007/07/nat-traversal-port-prediction-part-2-of.html
http://www.researchgate.net/publication/239801764_Implementing_NAT_Traversal_on_BitTorrent
[3] http://en.wikipedia.org/wiki/TCP_hole_punching | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L643-L786 |
StorjOld/pyp2p | pyp2p/lib.py | get_unused_port | def get_unused_port(port=None):
"""Checks if port is already in use."""
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
if e.errno in (98, 10048): # 98, 10048 means address already bound
return get_unused_port(None)
raise e
s.close()
return port | python | def get_unused_port(port=None):
"""Checks if port is already in use."""
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
if e.errno in (98, 10048): # 98, 10048 means address already bound
return get_unused_port(None)
raise e
s.close()
return port | Checks if port is already in use. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L27-L41 |
StorjOld/pyp2p | pyp2p/lib.py | get_lan_ip | def get_lan_ip(interface="default"):
if sys.version_info < (3, 0, 0):
if type(interface) == str:
interface = unicode(interface)
else:
if type(interface) == bytes:
interface = interface.decode("utf-8")
# Get ID of interface that handles WAN stuff.
default_gateway = get_default_gateway(interface)
gateways = netifaces.gateways()
wan_id = None
if netifaces.AF_INET in gateways:
gw_list = gateways[netifaces.AF_INET]
for gw_info in gw_list:
if gw_info[0] == default_gateway:
wan_id = gw_info[1]
break
# Find LAN IP of interface for WAN stuff.
interfaces = netifaces.interfaces()
if wan_id in interfaces:
families = netifaces.ifaddresses(wan_id)
if netifaces.AF_INET in families:
if_info_list = families[netifaces.AF_INET]
for if_info in if_info_list:
if "addr" in if_info:
return if_info["addr"]
"""
Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case
"""
if platform.system() == "Linux":
if ip is not None:
return ip.routes["8.8.8.8"]["prefsrc"]
return None | python | def get_lan_ip(interface="default"):
if sys.version_info < (3, 0, 0):
if type(interface) == str:
interface = unicode(interface)
else:
if type(interface) == bytes:
interface = interface.decode("utf-8")
# Get ID of interface that handles WAN stuff.
default_gateway = get_default_gateway(interface)
gateways = netifaces.gateways()
wan_id = None
if netifaces.AF_INET in gateways:
gw_list = gateways[netifaces.AF_INET]
for gw_info in gw_list:
if gw_info[0] == default_gateway:
wan_id = gw_info[1]
break
# Find LAN IP of interface for WAN stuff.
interfaces = netifaces.interfaces()
if wan_id in interfaces:
families = netifaces.ifaddresses(wan_id)
if netifaces.AF_INET in families:
if_info_list = families[netifaces.AF_INET]
for if_info in if_info_list:
if "addr" in if_info:
return if_info["addr"]
"""
Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case
"""
if platform.system() == "Linux":
if ip is not None:
return ip.routes["8.8.8.8"]["prefsrc"]
return None | Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L179-L218 |
StorjOld/pyp2p | pyp2p/lib.py | get_wan_ip | def get_wan_ip(n=0):
"""
That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure.
"""
if n == 2:
try:
ip = myip()
ip = extract_ip(ip)
if is_ip_valid(ip):
return ip
except Exception as e:
print(str(e))
return None
# Fail-safe: use centralized server for IP lookup.
from pyp2p.net import forwarding_servers
for forwarding_server in forwarding_servers:
url = "http://" + forwarding_server["addr"] + ":"
url += str(forwarding_server["port"])
url += forwarding_server["url"]
url += "?action=get_wan_ip"
try:
r = urlopen(url, timeout=5)
response = r.read().decode("utf-8")
response = extract_ip(response)
if is_ip_valid(response):
return response
except Exception as e:
print(str(e))
continue
time.sleep(1)
return get_wan_ip(n + 1) | python | def get_wan_ip(n=0):
"""
That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure.
"""
if n == 2:
try:
ip = myip()
ip = extract_ip(ip)
if is_ip_valid(ip):
return ip
except Exception as e:
print(str(e))
return None
# Fail-safe: use centralized server for IP lookup.
from pyp2p.net import forwarding_servers
for forwarding_server in forwarding_servers:
url = "http://" + forwarding_server["addr"] + ":"
url += str(forwarding_server["port"])
url += forwarding_server["url"]
url += "?action=get_wan_ip"
try:
r = urlopen(url, timeout=5)
response = r.read().decode("utf-8")
response = extract_ip(response)
if is_ip_valid(response):
return response
except Exception as e:
print(str(e))
continue
time.sleep(1)
return get_wan_ip(n + 1) | That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L378-L414 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | get_gateway_addr | def get_gateway_addr():
"""Use netifaces to get the gateway address, if we can't import it then
fall back to a hack to obtain the current gateway automatically, since
Python has no interface to sysctl().
This may or may not be the gateway we should be contacting.
It does not guarantee correct results.
This function requires the presence of netstat on the path on POSIX
and NT.
"""
try:
import netifaces
return netifaces.gateways()["default"][netifaces.AF_INET][0]
except ImportError:
shell_command = 'netstat -rn'
if os.name == "posix":
pattern = \
re.compile('(?:default|0\.0\.0\.0|::/0)\s+([\w\.:]+)\s+.*UG')
elif os.name == "nt":
if platform.version().startswith("6.1"):
pattern = re.compile(".*?0.0.0.0[ ]+0.0.0.0[ ]+(.*?)[ ]+?.*?\n")
else:
pattern = re.compile(".*?Default Gateway:[ ]+(.*?)\n")
system_out = os.popen(shell_command, 'r').read()
if not system_out:
raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND,
error_str(NATPMP_GATEWAY_CANNOT_FIND))
match = pattern.search(system_out)
if not match:
raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND,
error_str(NATPMP_GATEWAY_CANNOT_FIND))
addr = match.groups()[0].strip()
return addr | python | def get_gateway_addr():
"""Use netifaces to get the gateway address, if we can't import it then
fall back to a hack to obtain the current gateway automatically, since
Python has no interface to sysctl().
This may or may not be the gateway we should be contacting.
It does not guarantee correct results.
This function requires the presence of netstat on the path on POSIX
and NT.
"""
try:
import netifaces
return netifaces.gateways()["default"][netifaces.AF_INET][0]
except ImportError:
shell_command = 'netstat -rn'
if os.name == "posix":
pattern = \
re.compile('(?:default|0\.0\.0\.0|::/0)\s+([\w\.:]+)\s+.*UG')
elif os.name == "nt":
if platform.version().startswith("6.1"):
pattern = re.compile(".*?0.0.0.0[ ]+0.0.0.0[ ]+(.*?)[ ]+?.*?\n")
else:
pattern = re.compile(".*?Default Gateway:[ ]+(.*?)\n")
system_out = os.popen(shell_command, 'r').read()
if not system_out:
raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND,
error_str(NATPMP_GATEWAY_CANNOT_FIND))
match = pattern.search(system_out)
if not match:
raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND,
error_str(NATPMP_GATEWAY_CANNOT_FIND))
addr = match.groups()[0].strip()
return addr | Use netifaces to get the gateway address, if we can't import it then
fall back to a hack to obtain the current gateway automatically, since
Python has no interface to sysctl().
This may or may not be the gateway we should be contacting.
It does not guarantee correct results.
This function requires the presence of netstat on the path on POSIX
and NT. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L246-L279 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | get_gateway_socket | def get_gateway_socket(gateway):
"""Takes a gateway address string and returns a non-blocking UDP
socket to communicate with its NAT-PMP implementation on
NATPMP_PORT.
e.g. addr = get_gateway_socket('10.0.1.1')
"""
if not gateway:
raise NATPMPNetworkError(NATPMP_GATEWAY_NO_VALID_GATEWAY,
error_str(NATPMP_GATEWAY_NO_VALID_GATEWAY))
response_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
response_socket.setblocking(0)
response_socket.connect((gateway, NATPMP_PORT))
return response_socket | python | def get_gateway_socket(gateway):
"""Takes a gateway address string and returns a non-blocking UDP
socket to communicate with its NAT-PMP implementation on
NATPMP_PORT.
e.g. addr = get_gateway_socket('10.0.1.1')
"""
if not gateway:
raise NATPMPNetworkError(NATPMP_GATEWAY_NO_VALID_GATEWAY,
error_str(NATPMP_GATEWAY_NO_VALID_GATEWAY))
response_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
response_socket.setblocking(0)
response_socket.connect((gateway, NATPMP_PORT))
return response_socket | Takes a gateway address string and returns a non-blocking UDP
socket to communicate with its NAT-PMP implementation on
NATPMP_PORT.
e.g. addr = get_gateway_socket('10.0.1.1') | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L292-L305 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | get_public_address | def get_public_address(gateway_ip=None, retry=9):
"""A high-level function that returns the public interface IP of
the current host by querying the NAT-PMP gateway. IP is
returned as string.
Takes two possible keyword arguments:
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
"""
if gateway_ip is None:
gateway_ip = get_gateway_addr()
addr_request = PublicAddressRequest()
addr_response = send_request_with_retry(gateway_ip, addr_request,
response_data_class=
PublicAddressResponse,
retry=retry, response_size=12)
if addr_response.result != 0:
# sys.stderr.write("NAT-PMP error %d: %s\n" %
# (addr_response.result,
# error_str(addr_response.result)))
# sys.stderr.flush()
raise NATPMPResultError(addr_response.result,
error_str(addr_response.result), addr_response)
addr = addr_response.ip
return addr | python | def get_public_address(gateway_ip=None, retry=9):
"""A high-level function that returns the public interface IP of
the current host by querying the NAT-PMP gateway. IP is
returned as string.
Takes two possible keyword arguments:
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
"""
if gateway_ip is None:
gateway_ip = get_gateway_addr()
addr_request = PublicAddressRequest()
addr_response = send_request_with_retry(gateway_ip, addr_request,
response_data_class=
PublicAddressResponse,
retry=retry, response_size=12)
if addr_response.result != 0:
# sys.stderr.write("NAT-PMP error %d: %s\n" %
# (addr_response.result,
# error_str(addr_response.result)))
# sys.stderr.flush()
raise NATPMPResultError(addr_response.result,
error_str(addr_response.result), addr_response)
addr = addr_response.ip
return addr | A high-level function that returns the public interface IP of
the current host by querying the NAT-PMP gateway. IP is
returned as string.
Takes two possible keyword arguments:
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L308-L335 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | map_tcp_port | def map_tcp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping
for a public TCP port on the NAT to a private TCP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_TCP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | python | def map_tcp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping
for a public TCP port on the NAT to a private TCP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_TCP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | A high-level wrapper to map_port() that requests a mapping
for a public TCP port on the NAT to a private TCP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L338-L358 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | map_udp_port | def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | python | def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L361-L381 |
StorjOld/pyp2p | pyp2p/nat_pmp.py | map_port | def map_port(protocol, public_port, private_port, lifetime=3600,
gateway_ip=None, retry=9, use_exception=True):
"""A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result
is received from the gateway. Defaults to True.
"""
if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]:
raise ValueError("Must be either NATPMP_PROTOCOL_UDP or "
"NATPMP_PROTOCOL_TCP")
if gateway_ip is None:
gateway_ip = get_gateway_addr()
response = None
port_mapping_request = PortMapRequest(protocol, private_port,
public_port, lifetime)
port_mapping_response = \
send_request_with_retry(gateway_ip, port_mapping_request,
response_data_class=PortMapResponse,
retry=retry)
if port_mapping_response.result != 0 and use_exception:
raise NATPMPResultError(port_mapping_response.result,
error_str(port_mapping_response.result),
port_mapping_response)
return port_mapping_response | python | def map_port(protocol, public_port, private_port, lifetime=3600,
gateway_ip=None, retry=9, use_exception=True):
"""A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result
is received from the gateway. Defaults to True.
"""
if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]:
raise ValueError("Must be either NATPMP_PROTOCOL_UDP or "
"NATPMP_PROTOCOL_TCP")
if gateway_ip is None:
gateway_ip = get_gateway_addr()
response = None
port_mapping_request = PortMapRequest(protocol, private_port,
public_port, lifetime)
port_mapping_response = \
send_request_with_retry(gateway_ip, port_mapping_request,
response_data_class=PortMapResponse,
retry=retry)
if port_mapping_response.result != 0 and use_exception:
raise NATPMPResultError(port_mapping_response.result,
error_str(port_mapping_response.result),
port_mapping_response)
return port_mapping_response | A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result
is received from the gateway. Defaults to True. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L384-L418 |
StorjOld/pyp2p | pyp2p/upnp.py | UPnP.forward_port | def forward_port(self, proto, src_port, dest_ip, dest_port=None):
"""
Creates a new mapping for the default gateway to forward ports.
Source port is from the perspective of the original client.
For example, if a client tries to connect to us on port 80,
the source port is port 80. The destination port isn't
necessarily 80, however. We might wish to run our web server
on a different port so we can have the router forward requests
for port 80 to another port (what I call the destination port.)
If the destination port isn't specified, it defaults to the
source port. Proto is either TCP or UDP. Function returns None
on success, otherwise it raises an exception.
"""
proto = proto.upper()
valid_protos = ["TCP", "UDP"]
if proto not in valid_protos:
raise Exception("Invalid protocol for forwarding.")
valid_ports = range(1, 65535)
if src_port not in valid_ports:
raise Exception("Invalid port for forwarding.")
# Source port is forwarded to same destination port number.
if dest_port is None:
dest_port = src_port
# Use UPnP binary for forwarding on Windows.
if platform.system() == "Windows":
cmd = "upnpc-static.exe -a %s %s %s %s" % (get_lan_ip(),
str(src_port),
str(dest_port),
proto)
out, err = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if "is not recognized" in err:
raise Exception("Missing upnpc-static.exe")
return
# Find gateway address.
gateway_addr = self.find_gateway()
if gateway_addr is None:
raise Exception("Unable to find UPnP compatible gateway.")
# Get control URL.
rhost = re.findall('([^/]+)', gateway_addr)
res = urlopen(gateway_addr, timeout=self.timeout).read().decode("utf-8")
res = res.replace('\r', '')
res = res.replace('\n', '')
res = res.replace('\t', '')
pres = res.split('<serviceId>urn:upnp-org:serviceId:WANIPConn1'
'</serviceId>')
p2res = pres[1].split('</controlURL>')
p3res = p2res[0].split('<controlURL>')
ctrl = p3res[1]
rip = res.split('<presentationURL>')
rip1 = rip[1].split('</presentationURL>')
router_ip = rip1[0]
port_map_desc = "PyP2P"
msg = \
'<?xml version="1.0"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body><u:AddPortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1"><NewRemoteHost></NewRemoteHost><NewExternalPort>' \
+ str(src_port) \
+ '</NewExternalPort><NewProtocol>' + str(proto) + '</NewProtocol><NewInternalPort>' \
+ str(dest_port) + '</NewInternalPort><NewInternalClient>' + str(dest_ip) \
+ '</NewInternalClient><NewEnabled>1</NewEnabled><NewPortMappingDescription>' + str(port_map_desc) + '</NewPortMappingDescription><NewLeaseDuration>0</NewLeaseDuration></u:AddPortMapping></s:Body></s:Envelope>'
# Attempt to add new port map.
x = 'http://' + rhost[1] + '/' + ctrl
if sys.version_info >= (3, 0, 0):
msg = bytes(msg, "utf-8")
req = Request('http://' + rhost[1] + '/' + ctrl, msg)
req.add_header('SOAPAction',
'"urn:schemas-upnp-org:service:WANIPConnection:1#AddPortMapping"'
)
req.add_header('Content-type', 'application/xml')
res = urlopen(req, timeout=self.timeout) | python | def forward_port(self, proto, src_port, dest_ip, dest_port=None):
"""
Creates a new mapping for the default gateway to forward ports.
Source port is from the perspective of the original client.
For example, if a client tries to connect to us on port 80,
the source port is port 80. The destination port isn't
necessarily 80, however. We might wish to run our web server
on a different port so we can have the router forward requests
for port 80 to another port (what I call the destination port.)
If the destination port isn't specified, it defaults to the
source port. Proto is either TCP or UDP. Function returns None
on success, otherwise it raises an exception.
"""
proto = proto.upper()
valid_protos = ["TCP", "UDP"]
if proto not in valid_protos:
raise Exception("Invalid protocol for forwarding.")
valid_ports = range(1, 65535)
if src_port not in valid_ports:
raise Exception("Invalid port for forwarding.")
# Source port is forwarded to same destination port number.
if dest_port is None:
dest_port = src_port
# Use UPnP binary for forwarding on Windows.
if platform.system() == "Windows":
cmd = "upnpc-static.exe -a %s %s %s %s" % (get_lan_ip(),
str(src_port),
str(dest_port),
proto)
out, err = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if "is not recognized" in err:
raise Exception("Missing upnpc-static.exe")
return
# Find gateway address.
gateway_addr = self.find_gateway()
if gateway_addr is None:
raise Exception("Unable to find UPnP compatible gateway.")
# Get control URL.
rhost = re.findall('([^/]+)', gateway_addr)
res = urlopen(gateway_addr, timeout=self.timeout).read().decode("utf-8")
res = res.replace('\r', '')
res = res.replace('\n', '')
res = res.replace('\t', '')
pres = res.split('<serviceId>urn:upnp-org:serviceId:WANIPConn1'
'</serviceId>')
p2res = pres[1].split('</controlURL>')
p3res = p2res[0].split('<controlURL>')
ctrl = p3res[1]
rip = res.split('<presentationURL>')
rip1 = rip[1].split('</presentationURL>')
router_ip = rip1[0]
port_map_desc = "PyP2P"
msg = \
'<?xml version="1.0"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body><u:AddPortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1"><NewRemoteHost></NewRemoteHost><NewExternalPort>' \
+ str(src_port) \
+ '</NewExternalPort><NewProtocol>' + str(proto) + '</NewProtocol><NewInternalPort>' \
+ str(dest_port) + '</NewInternalPort><NewInternalClient>' + str(dest_ip) \
+ '</NewInternalClient><NewEnabled>1</NewEnabled><NewPortMappingDescription>' + str(port_map_desc) + '</NewPortMappingDescription><NewLeaseDuration>0</NewLeaseDuration></u:AddPortMapping></s:Body></s:Envelope>'
# Attempt to add new port map.
x = 'http://' + rhost[1] + '/' + ctrl
if sys.version_info >= (3, 0, 0):
msg = bytes(msg, "utf-8")
req = Request('http://' + rhost[1] + '/' + ctrl, msg)
req.add_header('SOAPAction',
'"urn:schemas-upnp-org:service:WANIPConnection:1#AddPortMapping"'
)
req.add_header('Content-type', 'application/xml')
res = urlopen(req, timeout=self.timeout) | Creates a new mapping for the default gateway to forward ports.
Source port is from the perspective of the original client.
For example, if a client tries to connect to us on port 80,
the source port is port 80. The destination port isn't
necessarily 80, however. We might wish to run our web server
on a different port so we can have the router forward requests
for port 80 to another port (what I call the destination port.)
If the destination port isn't specified, it defaults to the
source port. Proto is either TCP or UDP. Function returns None
on success, otherwise it raises an exception. | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/upnp.py#L159-L238 |
gawel/aiocron | aiocron/__init__.py | Cron.start | def start(self):
"""Start scheduling"""
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) | python | def start(self):
"""Start scheduling"""
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) | Start scheduling | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L43-L47 |
gawel/aiocron | aiocron/__init__.py | Cron.stop | def stop(self):
"""Stop scheduling"""
if self.handle is not None:
self.handle.cancel()
self.handle = self.future = self.croniter = None | python | def stop(self):
"""Stop scheduling"""
if self.handle is not None:
self.handle.cancel()
self.handle = self.future = self.croniter = None | Stop scheduling | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L49-L53 |
gawel/aiocron | aiocron/__init__.py | Cron.next | def next(self, *args):
"""yield from .next()"""
self.initialize()
self.future = asyncio.Future(loop=self.loop)
self.handle = self.loop.call_at(self.get_next(), self.call_func, *args)
return self.future | python | def next(self, *args):
"""yield from .next()"""
self.initialize()
self.future = asyncio.Future(loop=self.loop)
self.handle = self.loop.call_at(self.get_next(), self.call_func, *args)
return self.future | yield from .next() | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L56-L61 |
gawel/aiocron | aiocron/__init__.py | Cron.initialize | def initialize(self):
"""Initialize croniter and related times"""
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime) | python | def initialize(self):
"""Initialize croniter and related times"""
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime) | Initialize croniter and related times | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L63-L69 |
gawel/aiocron | aiocron/__init__.py | Cron.get_next | def get_next(self):
"""Return next iteration time related to loop time"""
return self.loop_time + (self.croniter.get_next(float) - self.time) | python | def get_next(self):
"""Return next iteration time related to loop time"""
return self.loop_time + (self.croniter.get_next(float) - self.time) | Return next iteration time related to loop time | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L71-L73 |
gawel/aiocron | aiocron/__init__.py | Cron.call_next | def call_next(self):
"""Set next hop in the loop. Call task"""
if self.handle is not None:
self.handle.cancel()
next_time = self.get_next()
self.handle = self.loop.call_at(next_time, self.call_next)
self.call_func() | python | def call_next(self):
"""Set next hop in the loop. Call task"""
if self.handle is not None:
self.handle.cancel()
next_time = self.get_next()
self.handle = self.loop.call_at(next_time, self.call_next)
self.call_func() | Set next hop in the loop. Call task | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L75-L81 |
gawel/aiocron | aiocron/__init__.py | Cron.call_func | def call_func(self, *args, **kwargs):
"""Called. Take care of exceptions using gather"""
asyncio.gather(
self.cron(*args, **kwargs),
loop=self.loop, return_exceptions=True
).add_done_callback(self.set_result) | python | def call_func(self, *args, **kwargs):
"""Called. Take care of exceptions using gather"""
asyncio.gather(
self.cron(*args, **kwargs),
loop=self.loop, return_exceptions=True
).add_done_callback(self.set_result) | Called. Take care of exceptions using gather | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L83-L88 |
gawel/aiocron | aiocron/__init__.py | Cron.set_result | def set_result(self, result):
"""Set future's result if needed (can be an exception).
Else raise if needed."""
result = result.result()[0]
if self.future is not None:
if isinstance(result, Exception):
self.future.set_exception(result)
else:
self.future.set_result(result)
self.future = None
elif isinstance(result, Exception):
raise result | python | def set_result(self, result):
"""Set future's result if needed (can be an exception).
Else raise if needed."""
result = result.result()[0]
if self.future is not None:
if isinstance(result, Exception):
self.future.set_exception(result)
else:
self.future.set_result(result)
self.future = None
elif isinstance(result, Exception):
raise result | Set future's result if needed (can be an exception).
Else raise if needed. | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L90-L101 |
nicolas-van/docker-compose-wait | timeparse.py | timeparse | def timeparse(sval):
"""Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> timeparse('1m24s')
84
>>> timeparse('1.2 minutes')
72
>>> timeparse('1.2 seconds')
1.2
"""
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
if not match or not match.group(0).strip():
return
mdict = match.groupdict()
return sum(
MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None) | python | def timeparse(sval):
"""Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> timeparse('1m24s')
84
>>> timeparse('1.2 minutes')
72
>>> timeparse('1.2 seconds')
1.2
"""
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
if not match or not match.group(0).strip():
return
mdict = match.groupdict()
return sum(
MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None) | Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> timeparse('1m24s')
84
>>> timeparse('1.2 minutes')
72
>>> timeparse('1.2 seconds')
1.2 | https://github.com/nicolas-van/docker-compose-wait/blob/86dab5c9f306ec73a7f9199ba244b47791e1ac73/timeparse.py#L68-L88 |
ppannuto/python-titlecase | titlecase/__init__.py | titlecase | def titlecase(text, callback=None, small_first_last=True):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = line.upper() == line
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if new_word:
# Address #22: If a callback has done something
# specific, leave this string alone from now on
tc_line.append(_mark_immutable(new_word))
continue
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
if APOS_SECOND.match(word):
if len(word[0]) == 1 and word[0] not in 'aeiouAEIOU':
word = word[0].lower() + word[1] + word[2].upper() + word[3:]
else:
word = word[0].upper() + word[1] + word[2].upper() + word[3:]
tc_line.append(word)
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
titlecase(match.group(2),callback,small_first_last)))
continue
if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
if "/" in word and "//" not in word:
slashed = map(
lambda t: titlecase(t,callback,False),
word.split('/')
)
tc_line.append("/".join(slashed))
continue
if '-' in word:
hyphenated = map(
lambda t: titlecase(t,callback,small_first_last),
word.split('-')
)
tc_line.append("-".join(hyphenated))
continue
if all_caps:
word = word.lower()
# Just a normal word that needs to be capitalized
tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
if small_first_last and tc_line:
if not isinstance(tc_line[0], Immutable):
tc_line[0] = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), tc_line[0])
if not isinstance(tc_line[-1], Immutable):
tc_line[-1] = SMALL_LAST.sub(
lambda m: m.group(0).capitalize(), tc_line[-1]
)
result = " ".join(tc_line)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed) | python | def titlecase(text, callback=None, small_first_last=True):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = line.upper() == line
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if new_word:
# Address #22: If a callback has done something
# specific, leave this string alone from now on
tc_line.append(_mark_immutable(new_word))
continue
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
if APOS_SECOND.match(word):
if len(word[0]) == 1 and word[0] not in 'aeiouAEIOU':
word = word[0].lower() + word[1] + word[2].upper() + word[3:]
else:
word = word[0].upper() + word[1] + word[2].upper() + word[3:]
tc_line.append(word)
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
titlecase(match.group(2),callback,small_first_last)))
continue
if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
if "/" in word and "//" not in word:
slashed = map(
lambda t: titlecase(t,callback,False),
word.split('/')
)
tc_line.append("/".join(slashed))
continue
if '-' in word:
hyphenated = map(
lambda t: titlecase(t,callback,small_first_last),
word.split('-')
)
tc_line.append("-".join(hyphenated))
continue
if all_caps:
word = word.lower()
# Just a normal word that needs to be capitalized
tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
if small_first_last and tc_line:
if not isinstance(tc_line[0], Immutable):
tc_line[0] = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), tc_line[0])
if not isinstance(tc_line[-1], Immutable):
tc_line[-1] = SMALL_LAST.sub(
lambda m: m.group(0).capitalize(), tc_line[-1]
)
result = " ".join(tc_line)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed) | Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'. | https://github.com/ppannuto/python-titlecase/blob/9000878d917f88030807b1bcdc04a0c37f7001ee/titlecase/__init__.py#L66-L162 |
ppannuto/python-titlecase | titlecase/__init__.py | cmd | def cmd():
'''Handler for command line invocation'''
# Try to handle any reasonable thing thrown at this.
# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout
# and treat any subsequent arguments as a space separated string to
# be titlecased (so it still works if people forget quotes)
parser = argparse.ArgumentParser()
in_group = parser.add_mutually_exclusive_group()
in_group.add_argument('string', nargs='*', default=[],
help='String to titlecase')
in_group.add_argument('-f', '--input-file',
help='File to read from to titlecase')
parser.add_argument('-o', '--output-file',
help='File to write titlecased output to)')
args = parser.parse_args()
if args.input_file is not None:
if args.input_file == '-':
ifile = sys.stdin
else:
ifile = open(args.input_file)
else:
ifile = sys.stdin
if args.output_file is not None:
if args.output_file == '-':
ofile = sys.stdout
else:
ofile = open(args.output_file, 'w')
else:
ofile = sys.stdout
if len(args.string) > 0:
in_string = ' '.join(args.string)
else:
with ifile:
in_string = ifile.read()
with ofile:
ofile.write(titlecase(in_string)) | python | def cmd():
'''Handler for command line invocation'''
# Try to handle any reasonable thing thrown at this.
# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout
# and treat any subsequent arguments as a space separated string to
# be titlecased (so it still works if people forget quotes)
parser = argparse.ArgumentParser()
in_group = parser.add_mutually_exclusive_group()
in_group.add_argument('string', nargs='*', default=[],
help='String to titlecase')
in_group.add_argument('-f', '--input-file',
help='File to read from to titlecase')
parser.add_argument('-o', '--output-file',
help='File to write titlecased output to)')
args = parser.parse_args()
if args.input_file is not None:
if args.input_file == '-':
ifile = sys.stdin
else:
ifile = open(args.input_file)
else:
ifile = sys.stdin
if args.output_file is not None:
if args.output_file == '-':
ofile = sys.stdout
else:
ofile = open(args.output_file, 'w')
else:
ofile = sys.stdout
if len(args.string) > 0:
in_string = ' '.join(args.string)
else:
with ifile:
in_string = ifile.read()
with ofile:
ofile.write(titlecase(in_string)) | Handler for command line invocation | https://github.com/ppannuto/python-titlecase/blob/9000878d917f88030807b1bcdc04a0c37f7001ee/titlecase/__init__.py#L165-L206 |
sobolevn/flake8-eradicate | flake8_eradicate.py | Checker.add_options | def add_options(cls, parser: OptionManager) -> None:
"""
``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance.
"""
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
type=None,
) | python | def add_options(cls, parser: OptionManager) -> None:
"""
``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance.
"""
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
type=None,
) | ``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance. | https://github.com/sobolevn/flake8-eradicate/blob/0d992fae5dd3bd9014d79291a4f08b6da17d3031/flake8_eradicate.py#L52-L71 |
sobolevn/flake8-eradicate | flake8_eradicate.py | Checker.run | def run(self) -> Generator[Tuple[int, int, str, type], None, None]:
"""
Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened.
"""
if self.filename != STDIN:
buffer = StringIO()
options = _Options(aggressive=self.options.eradicate_aggressive)
fix_file(self.filename, options, buffer)
traceback = buffer.getvalue()
if traceback:
yield 1, 0, self._error(traceback), type(self) | python | def run(self) -> Generator[Tuple[int, int, str, type], None, None]:
"""
Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened.
"""
if self.filename != STDIN:
buffer = StringIO()
options = _Options(aggressive=self.options.eradicate_aggressive)
fix_file(self.filename, options, buffer)
traceback = buffer.getvalue()
if traceback:
yield 1, 0, self._error(traceback), type(self) | Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened. | https://github.com/sobolevn/flake8-eradicate/blob/0d992fae5dd3bd9014d79291a4f08b6da17d3031/flake8_eradicate.py#L78-L92 |
ssanderson/interface | interface/utils.py | unique | def unique(g):
"""
Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2]
"""
yielded = set()
for value in g:
if value not in yielded:
yield value
yielded.add(value) | python | def unique(g):
"""
Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2]
"""
yielded = set()
for value in g:
if value not in yielded:
yield value
yielded.add(value) | Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2] | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/utils.py#L6-L19 |
ssanderson/interface | interface/interface.py | static_get_type_attr | def static_get_type_attr(t, name):
"""
Get a type attribute statically, circumventing the descriptor protocol.
"""
for type_ in t.mro():
try:
return vars(type_)[name]
except KeyError:
pass
raise AttributeError(name) | python | def static_get_type_attr(t, name):
"""
Get a type attribute statically, circumventing the descriptor protocol.
"""
for type_ in t.mro():
try:
return vars(type_)[name]
except KeyError:
pass
raise AttributeError(name) | Get a type attribute statically, circumventing the descriptor protocol. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L37-L46 |
ssanderson/interface | interface/interface.py | _conflicting_defaults | def _conflicting_defaults(typename, conflicts):
"""Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------
message : str
User-facing error message.
"""
message = "\nclass {C} received conflicting default implementations:".format(
C=typename,
)
for attrname, interfaces in conflicts.items():
message += dedent(
"""
The following interfaces provided default implementations for {attr!r}:
{interfaces}"""
).format(
attr=attrname,
interfaces=bulleted_list(sorted(map(getname, interfaces))),
)
return InvalidImplementation(message) | python | def _conflicting_defaults(typename, conflicts):
"""Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------
message : str
User-facing error message.
"""
message = "\nclass {C} received conflicting default implementations:".format(
C=typename,
)
for attrname, interfaces in conflicts.items():
message += dedent(
"""
The following interfaces provided default implementations for {attr!r}:
{interfaces}"""
).format(
attr=attrname,
interfaces=bulleted_list(sorted(map(getname, interfaces))),
)
return InvalidImplementation(message) | Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------
message : str
User-facing error message. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L49-L77 |
ssanderson/interface | interface/interface.py | InterfaceMeta._diff_signatures | def _diff_signatures(self, type_):
"""
Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a list of missing interface names.
``mistyped`` is a list mapping names to incorrect types.
``mismatched`` is a dict mapping names to incorrect signatures.
"""
missing = []
mistyped = {}
mismatched = {}
for name, iface_sig in self._signatures.items():
try:
# Don't invoke the descriptor protocol here so that we get
# staticmethod/classmethod/property objects instead of the
# functions they wrap.
f = static_get_type_attr(type_, name)
except AttributeError:
missing.append(name)
continue
impl_sig = TypedSignature(f)
if not issubclass(impl_sig.type, iface_sig.type):
mistyped[name] = impl_sig.type
if not compatible(impl_sig.signature, iface_sig.signature):
mismatched[name] = impl_sig
return missing, mistyped, mismatched | python | def _diff_signatures(self, type_):
"""
Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a list of missing interface names.
``mistyped`` is a list mapping names to incorrect types.
``mismatched`` is a dict mapping names to incorrect signatures.
"""
missing = []
mistyped = {}
mismatched = {}
for name, iface_sig in self._signatures.items():
try:
# Don't invoke the descriptor protocol here so that we get
# staticmethod/classmethod/property objects instead of the
# functions they wrap.
f = static_get_type_attr(type_, name)
except AttributeError:
missing.append(name)
continue
impl_sig = TypedSignature(f)
if not issubclass(impl_sig.type, iface_sig.type):
mistyped[name] = impl_sig.type
if not compatible(impl_sig.signature, iface_sig.signature):
mismatched[name] = impl_sig
return missing, mistyped, mismatched | Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a list of missing interface names.
``mistyped`` is a list mapping names to incorrect types.
``mismatched`` is a dict mapping names to incorrect signatures. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L116-L153 |
ssanderson/interface | interface/interface.py | InterfaceMeta.verify | def verify(self, type_):
"""
Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None
"""
raw_missing, mistyped, mismatched = self._diff_signatures(type_)
# See if we have defaults for missing methods.
missing = []
defaults_to_use = {}
for name in raw_missing:
try:
defaults_to_use[name] = self._defaults[name].implementation
except KeyError:
missing.append(name)
if not any((missing, mistyped, mismatched)):
return defaults_to_use
raise self._invalid_implementation(type_, missing, mistyped, mismatched) | python | def verify(self, type_):
"""
Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None
"""
raw_missing, mistyped, mismatched = self._diff_signatures(type_)
# See if we have defaults for missing methods.
missing = []
defaults_to_use = {}
for name in raw_missing:
try:
defaults_to_use[name] = self._defaults[name].implementation
except KeyError:
missing.append(name)
if not any((missing, mistyped, mismatched)):
return defaults_to_use
raise self._invalid_implementation(type_, missing, mistyped, mismatched) | Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L155-L187 |
ssanderson/interface | interface/interface.py | InterfaceMeta._invalid_implementation | def _invalid_implementation(self, t, missing, mistyped, mismatched):
"""
Make a TypeError explaining why ``t`` doesn't implement our interface.
"""
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
"""
The following methods of {I} were not implemented:
{missing_methods}"""
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
"""
The following methods of {I} were implemented with incorrect types:
{mismatched_types}"""
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
"""
The following methods of {I} were implemented with invalid signatures:
{mismatched_methods}"""
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | python | def _invalid_implementation(self, t, missing, mistyped, mismatched):
"""
Make a TypeError explaining why ``t`` doesn't implement our interface.
"""
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
"""
The following methods of {I} were not implemented:
{missing_methods}"""
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
"""
The following methods of {I} were implemented with incorrect types:
{mismatched_types}"""
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
"""
The following methods of {I} were implemented with invalid signatures:
{mismatched_methods}"""
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | Make a TypeError explaining why ``t`` doesn't implement our interface. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L189-L231 |
ssanderson/interface | interface/interface.py | Interface.from_class | def from_class(cls, existing_class, subset=None, name=None):
"""Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attributes not defined in an empty class.
name : str, optional
Name of the generated interface.
Default is ``existing_class.__name__ + 'Interface'``.
Returns
-------
interface : type
A new interface class with stubs generated from ``existing_class``.
"""
if name is None:
name = existing_class.__name__ + 'Interface'
if subset is None:
subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES
return InterfaceMeta(
name,
(Interface,),
{name: static_get_type_attr(existing_class, name) for name in subset},
) | python | def from_class(cls, existing_class, subset=None, name=None):
"""Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attributes not defined in an empty class.
name : str, optional
Name of the generated interface.
Default is ``existing_class.__name__ + 'Interface'``.
Returns
-------
interface : type
A new interface class with stubs generated from ``existing_class``.
"""
if name is None:
name = existing_class.__name__ + 'Interface'
if subset is None:
subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES
return InterfaceMeta(
name,
(Interface,),
{name: static_get_type_attr(existing_class, name) for name in subset},
) | Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attributes not defined in an empty class.
name : str, optional
Name of the generated interface.
Default is ``existing_class.__name__ + 'Interface'``.
Returns
-------
interface : type
A new interface class with stubs generated from ``existing_class``. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L319-L348 |
ssanderson/interface | interface/typecheck.py | compatible | def compatible(impl_sig, iface_sig):
"""
Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an interface if any valid
way of passing parameters to the interface method is also valid for the
implementation.
Consequently, the following differences are allowed between the signature
of an implementation method and the signature of its interface definition:
1. An implementation may add new arguments to an interface iff:
a. All new arguments have default values.
b. All new arguments accepted positionally (i.e. all non-keyword-only
arguments) occur after any arguments declared by the interface.
c. Keyword-only arguments may be reordered by the implementation.
2. For type-annotated interfaces, type annotations my differ as follows:
a. Arguments to implementations of an interface may be annotated with
a **superclass** of the type specified by the interface.
b. The return type of an implementation may be annotated with a
**subclass** of the type specified by the interface.
"""
return all([
positionals_compatible(
takewhile(is_positional, impl_sig.parameters.values()),
takewhile(is_positional, iface_sig.parameters.values()),
),
keywords_compatible(
valfilter(complement(is_positional), impl_sig.parameters),
valfilter(complement(is_positional), iface_sig.parameters),
),
]) | python | def compatible(impl_sig, iface_sig):
"""
Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an interface if any valid
way of passing parameters to the interface method is also valid for the
implementation.
Consequently, the following differences are allowed between the signature
of an implementation method and the signature of its interface definition:
1. An implementation may add new arguments to an interface iff:
a. All new arguments have default values.
b. All new arguments accepted positionally (i.e. all non-keyword-only
arguments) occur after any arguments declared by the interface.
c. Keyword-only arguments may be reordered by the implementation.
2. For type-annotated interfaces, type annotations my differ as follows:
a. Arguments to implementations of an interface may be annotated with
a **superclass** of the type specified by the interface.
b. The return type of an implementation may be annotated with a
**subclass** of the type specified by the interface.
"""
return all([
positionals_compatible(
takewhile(is_positional, impl_sig.parameters.values()),
takewhile(is_positional, iface_sig.parameters.values()),
),
keywords_compatible(
valfilter(complement(is_positional), impl_sig.parameters),
valfilter(complement(is_positional), iface_sig.parameters),
),
]) | Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an interface if any valid
way of passing parameters to the interface method is also valid for the
implementation.
Consequently, the following differences are allowed between the signature
of an implementation method and the signature of its interface definition:
1. An implementation may add new arguments to an interface iff:
a. All new arguments have default values.
b. All new arguments accepted positionally (i.e. all non-keyword-only
arguments) occur after any arguments declared by the interface.
c. Keyword-only arguments may be reordered by the implementation.
2. For type-annotated interfaces, type annotations my differ as follows:
a. Arguments to implementations of an interface may be annotated with
a **superclass** of the type specified by the interface.
b. The return type of an implementation may be annotated with a
**subclass** of the type specified by the interface. | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/typecheck.py#L10-L49 |
ml31415/numpy-groupies | numpy_groupies/benchmarks/simple.py | aggregate_group_loop | def aggregate_group_loop(*args, **kwargs):
"""wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops."""
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs) | python | def aggregate_group_loop(*args, **kwargs):
"""wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops."""
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs) | wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops. | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/benchmarks/simple.py#L14-L19 |
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_count | def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | python | def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | Return the amount of index changes within group_idx. | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L445-L455 |
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_indices | def step_indices(group_idx):
"""Return the edges of areas within group_idx, which are filled with the same value."""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices | python | def step_indices(group_idx):
"""Return the edges of areas within group_idx, which are filled with the same value."""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices | Return the edges of areas within group_idx, which are filled with the same value. | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L459-L472 |
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateOp.callable | def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True) | python | def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True) | Compile a jitted function doing the hard part of the job | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L91-L119 |
ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateGeneric.callable | def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True) | python | def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True) | Compile a jitted function and loop it over the sorted data. | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L208-L226 |
ml31415/numpy-groupies | numpy_groupies/utils.py | get_aliasing | def get_aliasing(*extra):
"""The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import.
"""
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member and add them directly
for key in set(alias.values()):
if key not in funcs_no_separate_nan:
key = 'nan' + key
alias[key] = key
return alias | python | def get_aliasing(*extra):
"""The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import.
"""
alias = dict((k, k) for k in funcs_common)
alias.update(_alias_str)
alias.update((fn, fn) for fn in _alias_builtin.values())
alias.update(_alias_builtin)
for d in extra:
alias.update(d)
alias.update((k, k) for k in set(alias.values()))
# Treat nan-functions as firstclass member and add them directly
for key in set(alias.values()):
if key not in funcs_no_separate_nan:
key = 'nan' + key
alias[key] = key
return alias | The assembles the dict mapping strings and functions to the list of
supported function names:
e.g. alias['add'] = 'sum' and alias[sorted] = 'sort'
This funciton should only be called during import. | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils.py#L95-L113 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.