index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
29,939 | zeroconf._exceptions | NotRunningException | Exception when an action is called with a zeroconf instance that is not running.
The instance may not be running because it was already shutdown
or startup has failed in some unexpected way.
| class NotRunningException(Error):
"""Exception when an action is called with a zeroconf instance that is not running.
The instance may not be running because it was already shutdown
or startup has failed in some unexpected way.
"""
| null |
29,940 | zeroconf._logger | QuietLogger | null | class QuietLogger:
_seen_logs: Dict[str, Union[int, tuple]] = {}
@classmethod
def log_exception_warning(cls, *logger_data: Any) -> None:
exc_info = sys.exc_info()
exc_str = str(exc_info[1])
if exc_str not in cls._seen_logs:
# log at warning level the first time this is seen
cls._seen_logs[exc_str] = exc_info
logger = log.warning
else:
logger = log.debug
logger(*(logger_data or ['Exception occurred']), exc_info=True)
@classmethod
def log_exception_debug(cls, *logger_data: Any) -> None:
log_exc_info = False
exc_info = sys.exc_info()
exc_str = str(exc_info[1])
if exc_str not in cls._seen_logs:
# log the trace only on the first time
cls._seen_logs[exc_str] = exc_info
log_exc_info = True
log.debug(*(logger_data or ['Exception occurred']), exc_info=log_exc_info)
@classmethod
def log_warning_once(cls, *args: Any) -> None:
msg_str = args[0]
if msg_str not in cls._seen_logs:
cls._seen_logs[msg_str] = 0
logger = log.warning
else:
logger = log.debug
cls._seen_logs[msg_str] = cast(int, cls._seen_logs[msg_str]) + 1
logger(*args)
@classmethod
def log_exception_once(cls, exc: Exception, *args: Any) -> None:
msg_str = args[0]
if msg_str not in cls._seen_logs:
cls._seen_logs[msg_str] = 0
logger = log.warning
else:
logger = log.debug
cls._seen_logs[msg_str] = cast(int, cls._seen_logs[msg_str]) + 1
logger(*args, exc_info=exc)
| () |
29,941 | zeroconf._record_update | RecordUpdate | null | from zeroconf._record_update import RecordUpdate
| null |
29,942 | zeroconf._updates | RecordUpdateListener | Base call for all record listeners.
All listeners passed to async_add_listener should use RecordUpdateListener
as a base class. In the future it will be required.
| from zeroconf._updates import RecordUpdateListener
| null |
29,943 | zeroconf._services.browser | ServiceBrowser | Used to browse for a service of a specific type.
The listener object will have its add_service() and
remove_service() methods called when this browser
discovers changes in the services availability. | from zeroconf._services.browser import ServiceBrowser
| (zc: "'Zeroconf'", type_: 'Union[str, list]', handlers: 'Optional[Union[ServiceListener, List[Callable[..., None]]]]' = None, listener: 'Optional[ServiceListener]' = None, addr: 'Optional[str]' = None, port: 'int' = 5353, delay: 'int' = 10000, question_type: 'Optional[DNSQuestionType]' = None) -> 'None' |
29,961 | zeroconf._services.info | ServiceInfo | Service information.
Constructor parameters are as follows:
* `type_`: fully qualified service type name
* `name`: fully qualified service name
* `port`: port that the service runs on
* `weight`: weight of the service
* `priority`: priority of the service
* `properties`: dictionary of properties (or a bytes object holding the contents of the `text` field).
converted to str and then encoded to bytes using UTF-8. Keys with `None` values are converted to
value-less attributes.
* `server`: fully qualified name for service host (defaults to name)
* `host_ttl`: ttl used for A/SRV records
* `other_ttl`: ttl used for PTR/TXT records
* `addresses` and `parsed_addresses`: List of IP addresses (either as bytes, network byte order,
or in parsed form as text; at most one of those parameters can be provided)
* interface_index: scope_id or zone_id for IPv6 link-local addresses i.e. an identifier of the interface
where the peer is connected to
| from zeroconf._services.info import ServiceInfo
| null |
29,962 | zeroconf._services | ServiceListener | null | from zeroconf._services import ServiceListener
| () |
29,963 | zeroconf._exceptions | ServiceNameAlreadyRegistered | Exception when a service name is already registered. | class ServiceNameAlreadyRegistered(Error):
"""Exception when a service name is already registered."""
| null |
29,964 | zeroconf._services.registry | ServiceRegistry | A registry to keep track of services.
The registry must only be accessed from
the event loop as it is not thread safe.
| from zeroconf._services.registry import ServiceRegistry
| null |
29,965 | zeroconf._services | ServiceStateChange | An enumeration. | from zeroconf._services import ServiceStateChange
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
29,966 | zeroconf._services.__init__ | Signal | null | from builtins import type
| null |
29,968 | zeroconf._core | Zeroconf | Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
| class Zeroconf(QuietLogger):
"""Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
"""
def __init__(
self,
interfaces: InterfacesType = InterfaceChoice.All,
unicast: bool = False,
ip_version: Optional[IPVersion] = None,
apple_p2p: bool = False,
) -> None:
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads.
:param interfaces: :class:`InterfaceChoice` or a list of IP addresses
(IPv4 and IPv6) and interface indexes (IPv6 only).
IPv6 notes for non-POSIX systems:
* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
on Python versions before 3.8.
Also listening on loopback (``::1``) doesn't work, use a real address.
:param ip_version: IP versions to support. If `choice` is a list, the default is detected
from it. Otherwise defaults to V4 only for backward compatibility.
:param apple_p2p: use AWDL interface (only macOS)
"""
if ip_version is None:
ip_version = autodetect_ip_version(interfaces)
self.done = False
if apple_p2p and sys.platform != 'darwin':
raise RuntimeError('Option `apple_p2p` is not supported on non-Apple platforms.')
self.unicast = unicast
listen_socket, respond_sockets = create_sockets(interfaces, unicast, ip_version, apple_p2p=apple_p2p)
log.debug('Listen socket %s, respond sockets %s', listen_socket, respond_sockets)
self.engine = AsyncEngine(self, listen_socket, respond_sockets)
self.browsers: Dict[ServiceListener, ServiceBrowser] = {}
self.registry = ServiceRegistry()
self.cache = DNSCache()
self.question_history = QuestionHistory()
self.out_queue = MulticastOutgoingQueue(self, 0, _AGGREGATION_DELAY)
self.out_delay_queue = MulticastOutgoingQueue(self, _ONE_SECOND, _PROTECTED_AGGREGATION_DELAY)
self.query_handler = QueryHandler(self)
self.record_manager = RecordManager(self)
self._notify_futures: Set[asyncio.Future] = set()
self.loop: Optional[asyncio.AbstractEventLoop] = None
self._loop_thread: Optional[threading.Thread] = None
self.start()
@property
def started(self) -> bool:
"""Check if the instance has started."""
return bool(not self.done and self.engine.running_event and self.engine.running_event.is_set())
def start(self) -> None:
"""Start Zeroconf."""
self.loop = get_running_loop()
if self.loop:
self.engine.setup(self.loop, None)
return
self._start_thread()
def _start_thread(self) -> None:
"""Start a thread with a running event loop."""
loop_thread_ready = threading.Event()
def _run_loop() -> None:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.engine.setup(self.loop, loop_thread_ready)
self.loop.run_forever()
self._loop_thread = threading.Thread(target=_run_loop, daemon=True)
self._loop_thread.start()
loop_thread_ready.wait()
async def async_wait_for_start(self) -> None:
"""Wait for start up for actions that require a running Zeroconf instance.
Throws NotRunningException if the instance is not running or could
not be started.
"""
if self.done: # If the instance was shutdown from under us, raise immediately
raise NotRunningException
assert self.engine.running_event is not None
await wait_event_or_timeout(self.engine.running_event, timeout=_STARTUP_TIMEOUT)
if not self.engine.running_event.is_set() or self.done:
raise NotRunningException
@property
def listeners(self) -> Set[RecordUpdateListener]:
return self.record_manager.listeners
async def async_wait(self, timeout: float) -> None:
"""Calling task waits for a given number of milliseconds or until notified."""
loop = self.loop
assert loop is not None
await wait_for_future_set_or_timeout(loop, self._notify_futures, timeout)
def notify_all(self) -> None:
"""Notifies all waiting threads and notify listeners."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_notify_all)
def async_notify_all(self) -> None:
"""Schedule an async_notify_all."""
notify_futures = self._notify_futures
if notify_futures:
_resolve_all_futures_to_none(notify_futures)
def get_service_info(
self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None
) -> Optional[ServiceInfo]:
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
:param type_: fully qualified service type name
:param name: the name of the service
:param timeout: milliseconds to wait for a response
:param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
"""
info = ServiceInfo(type_, name)
if info.request(self, timeout, question_type):
return info
return None
def add_service_listener(self, type_: str, listener: ServiceListener) -> None:
"""Adds a listener for a particular service type. This object
will then have its add_service and remove_service methods called when
services of that type become available and unavailable."""
self.remove_service_listener(listener)
self.browsers[listener] = ServiceBrowser(self, type_, listener)
def remove_service_listener(self, listener: ServiceListener) -> None:
"""Removes a listener from the set that is currently listening."""
if listener in self.browsers:
self.browsers[listener].cancel()
del self.browsers[listener]
def remove_all_service_listeners(self) -> None:
"""Removes a listener from the set that is currently listening."""
for listener in list(self.browsers):
self.remove_service_listener(listener)
def register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
strict: bool = True,
) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`).
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `register_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(
self.async_register_service(info, ttl, allow_name_change, cooperating_responders, strict)
),
self.loop,
_REGISTER_TIME * _REGISTER_BROADCASTS,
)
async def async_register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
strict: bool = True,
) -> Awaitable:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`)."""
if ttl is not None:
# ttl argument is used to maintain backward compatibility
# Setting TTLs via ServiceInfo is preferred
info.host_ttl = ttl
info.other_ttl = ttl
info.set_server_if_missing()
await self.async_wait_for_start()
await self.async_check_service(info, allow_name_change, cooperating_responders, strict)
self.registry.async_add(info)
return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))
def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_update_service(self, info: ServiceInfo) -> Awaitable:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service."""
self.registry.async_update(info)
return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))
async def async_get_service_info(
self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None
) -> Optional[AsyncServiceInfo]:
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
:param type_: fully qualified service type name
:param name: the name of the service
:param timeout: milliseconds to wait for a response
:param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
"""
info = AsyncServiceInfo(type_, name)
if await info.async_request(self, timeout, question_type):
return info
return None
async def _async_broadcast_service(
self,
info: ServiceInfo,
interval: int,
ttl: Optional[int],
broadcast_addresses: bool = True,
) -> None:
"""Send a broadcasts to announce a service at intervals."""
for i in range(_REGISTER_BROADCASTS):
if i != 0:
await asyncio.sleep(millis_to_seconds(interval))
self.async_send(self.generate_service_broadcast(info, ttl, broadcast_addresses))
def generate_service_broadcast(
self,
info: ServiceInfo,
ttl: Optional[int],
broadcast_addresses: bool = True,
) -> DNSOutgoing:
"""Generate a broadcast to announce a service."""
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
self._add_broadcast_answer(out, info, ttl, broadcast_addresses)
return out
def generate_service_query(self, info: ServiceInfo) -> DNSOutgoing: # pylint: disable=no-self-use
"""Generate a query to lookup a service."""
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
# https://datatracker.ietf.org/doc/html/rfc6762#section-8.1
# Because of the mDNS multicast rate-limiting
# rules, the probes SHOULD be sent as "QU" questions with the unicast-
# response bit set, to allow a defending host to respond immediately
# via unicast, instead of potentially having to wait before replying
# via multicast.
#
# _CLASS_UNIQUE is the "QU" bit
out.add_question(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN | _CLASS_UNIQUE))
out.add_authorative_answer(info.dns_pointer())
return out
def _add_broadcast_answer( # pylint: disable=no-self-use
self,
out: DNSOutgoing,
info: ServiceInfo,
override_ttl: Optional[int],
broadcast_addresses: bool = True,
) -> None:
"""Add answers to broadcast a service."""
current_time_millis()
other_ttl = None if override_ttl is None else override_ttl
host_ttl = None if override_ttl is None else override_ttl
out.add_answer_at_time(info.dns_pointer(override_ttl=other_ttl), 0)
out.add_answer_at_time(info.dns_service(override_ttl=host_ttl), 0)
out.add_answer_at_time(info.dns_text(override_ttl=other_ttl), 0)
if broadcast_addresses:
for record in info.get_address_and_nsec_records(override_ttl=host_ttl):
out.add_answer_at_time(record, 0)
def unregister_service(self, info: ServiceInfo) -> None:
"""Unregister a service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_service(info), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_unregister_service(self, info: ServiceInfo) -> Awaitable:
"""Unregister a service."""
info.set_server_if_missing()
self.registry.async_remove(info)
# If another server uses the same addresses, we do not want to send
# goodbye packets for the address records
assert info.server_key is not None
entries = self.registry.async_get_infos_server(info.server_key)
broadcast_addresses = not bool(entries)
return asyncio.ensure_future(
self._async_broadcast_service(info, _UNREGISTER_TIME, 0, broadcast_addresses)
)
def generate_unregister_all_services(self) -> Optional[DNSOutgoing]:
"""Generate a DNSOutgoing goodbye for all services and remove them from the registry."""
service_infos = self.registry.async_get_service_infos()
if not service_infos:
return None
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in service_infos:
self._add_broadcast_answer(out, info, 0)
self.registry.async_remove(service_infos)
return out
async def async_unregister_all_services(self) -> None:
"""Unregister all registered services.
Unlike async_register_service and async_unregister_service, this
method does not return a future and is always expected to be
awaited since its only called at shutdown.
"""
# Send Goodbye packets https://datatracker.ietf.org/doc/html/rfc6762#section-10.1
out = self.generate_unregister_all_services()
if not out:
return
for i in range(_REGISTER_BROADCASTS):
if i != 0:
await asyncio.sleep(millis_to_seconds(_UNREGISTER_TIME))
self.async_send(out)
def unregister_all_services(self) -> None:
"""Unregister all registered services.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_all_services` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_all_services(), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_check_service(
self,
info: ServiceInfo,
allow_name_change: bool,
cooperating_responders: bool = False,
strict: bool = True,
) -> None:
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
instance_name = instance_name_from_service_info(info, strict=strict)
if cooperating_responders:
return
next_instance_number = 2
next_time = now = current_time_millis()
i = 0
while i < _REGISTER_BROADCASTS:
# check for a name conflict
while self.cache.current_entry_with_name_and_alias(info.type, info.name):
if not allow_name_change:
raise NonUniqueNameException
# change the name and look for a conflict
info.name = f'{instance_name}-{next_instance_number}.{info.type}'
next_instance_number += 1
service_type_name(info.name, strict=strict)
next_time = now
i = 0
if now < next_time:
await self.async_wait(next_time - now)
now = current_time_millis()
continue
self.async_send(self.generate_service_query(info))
i += 1
next_time += _CHECK_TIME
def add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_add_listener, listener, question)
def remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_remove_listener, listener)
def async_add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_add_listener(listener, question)
def async_remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_remove_listener(listener)
def send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[_WrappedTransport] = None,
) -> None:
"""Sends an outgoing packet threadsafe."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_send, out, addr, port, v6_flow_scope, transport)
def async_send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[_WrappedTransport] = None,
) -> None:
"""Sends an outgoing packet."""
if self.done:
return
# If no transport is specified, we send to all the ones
# with the same address family
transports = [transport] if transport else self.engine.senders
log_debug = log.isEnabledFor(logging.DEBUG)
for packet_num, packet in enumerate(out.packets()):
if len(packet) > _MAX_MSG_ABSOLUTE:
self.log_warning_once("Dropping %r over-sized packet (%d bytes) %r", out, len(packet), packet)
return
for send_transport in transports:
async_send_with_transport(
log_debug, send_transport, packet, packet_num, out, addr, port, v6_flow_scope
)
def _close(self) -> None:
"""Set global done and remove all service listeners."""
if self.done:
return
self.remove_all_service_listeners()
self.done = True
def _shutdown_threads(self) -> None:
"""Shutdown any threads."""
self.notify_all()
if not self._loop_thread:
return
assert self.loop is not None
shutdown_loop(self.loop)
self._loop_thread.join()
self._loop_thread = None
def close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
"""
assert self.loop is not None
if self.loop.is_running():
if self.loop == get_running_loop():
log.warning(
"unregister_all_services skipped as it does blocking i/o; use AsyncZeroconf with asyncio"
)
else:
self.unregister_all_services()
self._close()
self.engine.close()
self._shutdown_threads()
async def _async_close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
This call only intended to be used by AsyncZeroconf
Callers are responsible for unregistering all services
before calling this function
"""
self._close()
await self.engine._async_close() # pylint: disable=protected-access
self._shutdown_threads()
def __enter__(self) -> 'Zeroconf':
return self
def __exit__( # pylint: disable=useless-return
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self.close()
return None
| (interfaces: Union[Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]], zeroconf._utils.net.InterfaceChoice] = <InterfaceChoice.All: 2>, unicast: bool = False, ip_version: Optional[zeroconf._utils.net.IPVersion] = None, apple_p2p: bool = False) -> None |
29,969 | zeroconf._core | __enter__ | null | def __enter__(self) -> 'Zeroconf':
return self
| (self) -> zeroconf._core.Zeroconf |
29,970 | zeroconf._core | __exit__ | null | def __exit__( # pylint: disable=useless-return
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self.close()
return None
| (self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[traceback]) -> Optional[bool] |
29,971 | zeroconf._core | __init__ | Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads.
:param interfaces: :class:`InterfaceChoice` or a list of IP addresses
(IPv4 and IPv6) and interface indexes (IPv6 only).
IPv6 notes for non-POSIX systems:
* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
on Python versions before 3.8.
Also listening on loopback (``::1``) doesn't work, use a real address.
:param ip_version: IP versions to support. If `choice` is a list, the default is detected
from it. Otherwise defaults to V4 only for backward compatibility.
:param apple_p2p: use AWDL interface (only macOS)
| def __init__(
self,
interfaces: InterfacesType = InterfaceChoice.All,
unicast: bool = False,
ip_version: Optional[IPVersion] = None,
apple_p2p: bool = False,
) -> None:
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads.
:param interfaces: :class:`InterfaceChoice` or a list of IP addresses
(IPv4 and IPv6) and interface indexes (IPv6 only).
IPv6 notes for non-POSIX systems:
* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
on Python versions before 3.8.
Also listening on loopback (``::1``) doesn't work, use a real address.
:param ip_version: IP versions to support. If `choice` is a list, the default is detected
from it. Otherwise defaults to V4 only for backward compatibility.
:param apple_p2p: use AWDL interface (only macOS)
"""
if ip_version is None:
ip_version = autodetect_ip_version(interfaces)
self.done = False
if apple_p2p and sys.platform != 'darwin':
raise RuntimeError('Option `apple_p2p` is not supported on non-Apple platforms.')
self.unicast = unicast
listen_socket, respond_sockets = create_sockets(interfaces, unicast, ip_version, apple_p2p=apple_p2p)
log.debug('Listen socket %s, respond sockets %s', listen_socket, respond_sockets)
self.engine = AsyncEngine(self, listen_socket, respond_sockets)
self.browsers: Dict[ServiceListener, ServiceBrowser] = {}
self.registry = ServiceRegistry()
self.cache = DNSCache()
self.question_history = QuestionHistory()
self.out_queue = MulticastOutgoingQueue(self, 0, _AGGREGATION_DELAY)
self.out_delay_queue = MulticastOutgoingQueue(self, _ONE_SECOND, _PROTECTED_AGGREGATION_DELAY)
self.query_handler = QueryHandler(self)
self.record_manager = RecordManager(self)
self._notify_futures: Set[asyncio.Future] = set()
self.loop: Optional[asyncio.AbstractEventLoop] = None
self._loop_thread: Optional[threading.Thread] = None
self.start()
| (self, interfaces: Union[Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]], zeroconf._utils.net.InterfaceChoice] = <InterfaceChoice.All: 2>, unicast: bool = False, ip_version: Optional[zeroconf._utils.net.IPVersion] = None, apple_p2p: bool = False) -> NoneType |
29,972 | zeroconf._core | _add_broadcast_answer | Add answers to broadcast a service. | def _add_broadcast_answer( # pylint: disable=no-self-use
self,
out: DNSOutgoing,
info: ServiceInfo,
override_ttl: Optional[int],
broadcast_addresses: bool = True,
) -> None:
"""Add answers to broadcast a service."""
current_time_millis()
other_ttl = None if override_ttl is None else override_ttl
host_ttl = None if override_ttl is None else override_ttl
out.add_answer_at_time(info.dns_pointer(override_ttl=other_ttl), 0)
out.add_answer_at_time(info.dns_service(override_ttl=host_ttl), 0)
out.add_answer_at_time(info.dns_text(override_ttl=other_ttl), 0)
if broadcast_addresses:
for record in info.get_address_and_nsec_records(override_ttl=host_ttl):
out.add_answer_at_time(record, 0)
| (self, out: zeroconf._protocol.outgoing.DNSOutgoing, info: zeroconf._services.info.ServiceInfo, override_ttl: Optional[int], broadcast_addresses: bool = True) -> NoneType |
29,973 | zeroconf._core | _async_broadcast_service | Send a broadcasts to announce a service at intervals. | def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo, interval: int, ttl: Optional[int], broadcast_addresses: bool = True) -> NoneType |
29,974 | zeroconf._core | _async_close | Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
This call only intended to be used by AsyncZeroconf
Callers are responsible for unregistering all services
before calling this function
| def close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
"""
assert self.loop is not None
if self.loop.is_running():
if self.loop == get_running_loop():
log.warning(
"unregister_all_services skipped as it does blocking i/o; use AsyncZeroconf with asyncio"
)
else:
self.unregister_all_services()
self._close()
self.engine.close()
self._shutdown_threads()
| (self) -> NoneType |
29,975 | zeroconf._core | _close | Set global done and remove all service listeners. | def _close(self) -> None:
"""Set global done and remove all service listeners."""
if self.done:
return
self.remove_all_service_listeners()
self.done = True
| (self) -> NoneType |
29,976 | zeroconf._core | _shutdown_threads | Shutdown any threads. | def _shutdown_threads(self) -> None:
"""Shutdown any threads."""
self.notify_all()
if not self._loop_thread:
return
assert self.loop is not None
shutdown_loop(self.loop)
self._loop_thread.join()
self._loop_thread = None
| (self) -> NoneType |
29,977 | zeroconf._core | _start_thread | Start a thread with a running event loop. | def _start_thread(self) -> None:
"""Start a thread with a running event loop."""
loop_thread_ready = threading.Event()
def _run_loop() -> None:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.engine.setup(self.loop, loop_thread_ready)
self.loop.run_forever()
self._loop_thread = threading.Thread(target=_run_loop, daemon=True)
self._loop_thread.start()
loop_thread_ready.wait()
| (self) -> NoneType |
29,978 | zeroconf._core | add_listener | Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is threadsafe
| def add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_add_listener, listener, question)
| (self, listener: zeroconf._updates.RecordUpdateListener, question: Union[zeroconf._dns.DNSQuestion, List[zeroconf._dns.DNSQuestion], NoneType]) -> NoneType |
29,979 | zeroconf._core | add_service_listener | Adds a listener for a particular service type. This object
will then have its add_service and remove_service methods called when
services of that type become available and unavailable. | def add_service_listener(self, type_: str, listener: ServiceListener) -> None:
"""Adds a listener for a particular service type. This object
will then have its add_service and remove_service methods called when
services of that type become available and unavailable."""
self.remove_service_listener(listener)
self.browsers[listener] = ServiceBrowser(self, type_, listener)
| (self, type_: str, listener: zeroconf._services.ServiceListener) -> NoneType |
29,980 | zeroconf._core | async_add_listener | Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is not threadsafe and must be called in the eventloop.
| def async_add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_add_listener(listener, question)
| (self, listener: zeroconf._updates.RecordUpdateListener, question: Union[zeroconf._dns.DNSQuestion, List[zeroconf._dns.DNSQuestion], NoneType]) -> NoneType |
29,981 | zeroconf._core | async_check_service | Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique. | def unregister_all_services(self) -> None:
"""Unregister all registered services.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_all_services` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_all_services(), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo, allow_name_change: bool, cooperating_responders: bool = False, strict: bool = True) -> NoneType |
29,982 | zeroconf._core | async_get_service_info | Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
:param type_: fully qualified service type name
:param name: the name of the service
:param timeout: milliseconds to wait for a response
:param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
| def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, type_: str, name: str, timeout: int = 3000, question_type: Optional[zeroconf._dns.DNSQuestionType] = None) -> Optional[zeroconf._services.info.AsyncServiceInfo] |
29,983 | zeroconf._core | async_notify_all | Schedule an async_notify_all. | def async_notify_all(self) -> None:
"""Schedule an async_notify_all."""
notify_futures = self._notify_futures
if notify_futures:
_resolve_all_futures_to_none(notify_futures)
| (self) -> NoneType |
29,984 | zeroconf._core | async_register_service | Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`). | def register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
strict: bool = True,
) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`).
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `register_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(
self.async_register_service(info, ttl, allow_name_change, cooperating_responders, strict)
),
self.loop,
_REGISTER_TIME * _REGISTER_BROADCASTS,
)
| (self, info: zeroconf._services.info.ServiceInfo, ttl: Optional[int] = None, allow_name_change: bool = False, cooperating_responders: bool = False, strict: bool = True) -> Awaitable |
29,985 | zeroconf._core | async_remove_listener | Removes a listener.
This function is not threadsafe and must be called in the eventloop.
| def async_remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_remove_listener(listener)
| (self, listener: zeroconf._updates.RecordUpdateListener) -> NoneType |
29,986 | zeroconf._core | async_send | Sends an outgoing packet. | def async_send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[_WrappedTransport] = None,
) -> None:
"""Sends an outgoing packet."""
if self.done:
return
# If no transport is specified, we send to all the ones
# with the same address family
transports = [transport] if transport else self.engine.senders
log_debug = log.isEnabledFor(logging.DEBUG)
for packet_num, packet in enumerate(out.packets()):
if len(packet) > _MAX_MSG_ABSOLUTE:
self.log_warning_once("Dropping %r over-sized packet (%d bytes) %r", out, len(packet), packet)
return
for send_transport in transports:
async_send_with_transport(
log_debug, send_transport, packet, packet_num, out, addr, port, v6_flow_scope
)
| (self, out: zeroconf._protocol.outgoing.DNSOutgoing, addr: Optional[str] = None, port: int = 5353, v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), transport: Optional[zeroconf._transport._WrappedTransport] = None) -> NoneType |
29,987 | zeroconf._core | async_unregister_all_services | Unregister all registered services.
Unlike async_register_service and async_unregister_service, this
method does not return a future and is always expected to be
awaited since its only called at shutdown.
| def generate_unregister_all_services(self) -> Optional[DNSOutgoing]:
"""Generate a DNSOutgoing goodbye for all services and remove them from the registry."""
service_infos = self.registry.async_get_service_infos()
if not service_infos:
return None
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in service_infos:
self._add_broadcast_answer(out, info, 0)
self.registry.async_remove(service_infos)
return out
| (self) -> NoneType |
29,988 | zeroconf._core | async_unregister_service | Unregister a service. | def unregister_service(self, info: ServiceInfo) -> None:
"""Unregister a service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_service(info), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo) -> Awaitable |
29,989 | zeroconf._core | async_update_service | Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. | def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo) -> Awaitable |
29,990 | zeroconf._core | async_wait | Calling task waits for a given number of milliseconds or until notified. | @property
def listeners(self) -> Set[RecordUpdateListener]:
return self.record_manager.listeners
| (self, timeout: float) -> NoneType |
29,991 | zeroconf._core | async_wait_for_start | Wait for start up for actions that require a running Zeroconf instance.
Throws NotRunningException if the instance is not running or could
not be started.
| def _start_thread(self) -> None:
"""Start a thread with a running event loop."""
loop_thread_ready = threading.Event()
def _run_loop() -> None:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.engine.setup(self.loop, loop_thread_ready)
self.loop.run_forever()
self._loop_thread = threading.Thread(target=_run_loop, daemon=True)
self._loop_thread.start()
loop_thread_ready.wait()
| (self) -> NoneType |
29,992 | zeroconf._core | close | Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
| def close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
"""
assert self.loop is not None
if self.loop.is_running():
if self.loop == get_running_loop():
log.warning(
"unregister_all_services skipped as it does blocking i/o; use AsyncZeroconf with asyncio"
)
else:
self.unregister_all_services()
self._close()
self.engine.close()
self._shutdown_threads()
| (self) -> NoneType |
29,993 | zeroconf._core | generate_service_broadcast | Generate a broadcast to announce a service. | def generate_service_broadcast(
self,
info: ServiceInfo,
ttl: Optional[int],
broadcast_addresses: bool = True,
) -> DNSOutgoing:
"""Generate a broadcast to announce a service."""
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
self._add_broadcast_answer(out, info, ttl, broadcast_addresses)
return out
| (self, info: zeroconf._services.info.ServiceInfo, ttl: Optional[int], broadcast_addresses: bool = True) -> zeroconf._protocol.outgoing.DNSOutgoing |
29,994 | zeroconf._core | generate_service_query | Generate a query to lookup a service. | def generate_service_query(self, info: ServiceInfo) -> DNSOutgoing: # pylint: disable=no-self-use
"""Generate a query to lookup a service."""
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
# https://datatracker.ietf.org/doc/html/rfc6762#section-8.1
# Because of the mDNS multicast rate-limiting
# rules, the probes SHOULD be sent as "QU" questions with the unicast-
# response bit set, to allow a defending host to respond immediately
# via unicast, instead of potentially having to wait before replying
# via multicast.
#
# _CLASS_UNIQUE is the "QU" bit
out.add_question(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN | _CLASS_UNIQUE))
out.add_authorative_answer(info.dns_pointer())
return out
| (self, info: zeroconf._services.info.ServiceInfo) -> zeroconf._protocol.outgoing.DNSOutgoing |
29,995 | zeroconf._core | generate_unregister_all_services | Generate a DNSOutgoing goodbye for all services and remove them from the registry. | def generate_unregister_all_services(self) -> Optional[DNSOutgoing]:
"""Generate a DNSOutgoing goodbye for all services and remove them from the registry."""
service_infos = self.registry.async_get_service_infos()
if not service_infos:
return None
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in service_infos:
self._add_broadcast_answer(out, info, 0)
self.registry.async_remove(service_infos)
return out
| (self) -> Optional[zeroconf._protocol.outgoing.DNSOutgoing] |
29,996 | zeroconf._core | get_service_info | Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
:param type_: fully qualified service type name
:param name: the name of the service
:param timeout: milliseconds to wait for a response
:param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
| def get_service_info(
self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None
) -> Optional[ServiceInfo]:
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
:param type_: fully qualified service type name
:param name: the name of the service
:param timeout: milliseconds to wait for a response
:param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
"""
info = ServiceInfo(type_, name)
if info.request(self, timeout, question_type):
return info
return None
| (self, type_: str, name: str, timeout: int = 3000, question_type: Optional[zeroconf._dns.DNSQuestionType] = None) -> Optional[zeroconf._services.info.ServiceInfo] |
29,997 | zeroconf._core | notify_all | Notifies all waiting threads and notify listeners. | def notify_all(self) -> None:
"""Notifies all waiting threads and notify listeners."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_notify_all)
| (self) -> NoneType |
29,998 | zeroconf._core | register_service | Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`).
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `register_service` cannot be completed.
| def register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
strict: bool = True,
) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`).
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `register_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(
self.async_register_service(info, ttl, allow_name_change, cooperating_responders, strict)
),
self.loop,
_REGISTER_TIME * _REGISTER_BROADCASTS,
)
| (self, info: zeroconf._services.info.ServiceInfo, ttl: Optional[int] = None, allow_name_change: bool = False, cooperating_responders: bool = False, strict: bool = True) -> NoneType |
29,999 | zeroconf._core | remove_all_service_listeners | Removes a listener from the set that is currently listening. | def remove_all_service_listeners(self) -> None:
"""Removes a listener from the set that is currently listening."""
for listener in list(self.browsers):
self.remove_service_listener(listener)
| (self) -> NoneType |
30,000 | zeroconf._core | remove_listener | Removes a listener.
This function is threadsafe
| def remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_remove_listener, listener)
| (self, listener: zeroconf._updates.RecordUpdateListener) -> NoneType |
30,001 | zeroconf._core | remove_service_listener | Removes a listener from the set that is currently listening. | def remove_service_listener(self, listener: ServiceListener) -> None:
"""Removes a listener from the set that is currently listening."""
if listener in self.browsers:
self.browsers[listener].cancel()
del self.browsers[listener]
| (self, listener: zeroconf._services.ServiceListener) -> NoneType |
30,002 | zeroconf._core | send | Sends an outgoing packet threadsafe. | def send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[_WrappedTransport] = None,
) -> None:
"""Sends an outgoing packet threadsafe."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_send, out, addr, port, v6_flow_scope, transport)
| (self, out: zeroconf._protocol.outgoing.DNSOutgoing, addr: Optional[str] = None, port: int = 5353, v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), transport: Optional[zeroconf._transport._WrappedTransport] = None) -> NoneType |
30,003 | zeroconf._core | start | Start Zeroconf. | def start(self) -> None:
"""Start Zeroconf."""
self.loop = get_running_loop()
if self.loop:
self.engine.setup(self.loop, None)
return
self._start_thread()
| (self) -> NoneType |
30,004 | zeroconf._core | unregister_all_services | Unregister all registered services.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_all_services` cannot be completed.
| def unregister_all_services(self) -> None:
"""Unregister all registered services.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_all_services` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_all_services(), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
| (self) -> NoneType |
30,005 | zeroconf._core | unregister_service | Unregister a service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_service` cannot be completed.
| def unregister_service(self, info: ServiceInfo) -> None:
"""Unregister a service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_service(info), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo) -> NoneType |
30,006 | zeroconf._core | update_service | Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
| def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
| (self, info: zeroconf._services.info.ServiceInfo) -> NoneType |
30,007 | zeroconf._services.types | ZeroconfServiceTypes |
Return all of the advertised services on any local networks
| class ZeroconfServiceTypes(ServiceListener):
"""
Return all of the advertised services on any local networks
"""
def __init__(self) -> None:
"""Keep track of found services in a set."""
self.found_services: Set[str] = set()
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service added."""
self.found_services.add(name)
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service updated."""
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service removed."""
@classmethod
def find(
cls,
zc: Optional[Zeroconf] = None,
timeout: Union[int, float] = 5,
interfaces: InterfacesType = InterfaceChoice.All,
ip_version: Optional[IPVersion] = None,
) -> Tuple[str, ...]:
"""
Return all of the advertised services on any local networks.
:param zc: Zeroconf() instance. Pass in if already have an
instance running or if non-default interfaces are needed
:param timeout: seconds to wait for any responses
:param interfaces: interfaces to listen on.
:param ip_version: IP protocol version to use.
:return: tuple of service type strings
"""
local_zc = zc or Zeroconf(interfaces=interfaces, ip_version=ip_version)
listener = cls()
browser = ServiceBrowser(local_zc, _SERVICE_TYPE_ENUMERATION_NAME, listener=listener)
# wait for responses
time.sleep(timeout)
browser.cancel()
# close down anything we opened
if zc is None:
local_zc.close()
return tuple(sorted(listener.found_services))
| () -> None |
30,008 | zeroconf._services.types | __init__ | Keep track of found services in a set. | def __init__(self) -> None:
"""Keep track of found services in a set."""
self.found_services: Set[str] = set()
| (self) -> NoneType |
30,009 | zeroconf._services.types | add_service | Service added. | def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service added."""
self.found_services.add(name)
| (self, zc: zeroconf._core.Zeroconf, type_: str, name: str) -> NoneType |
30,010 | zeroconf._services.types | remove_service | Service removed. | def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service removed."""
| (self, zc: zeroconf._core.Zeroconf, type_: str, name: str) -> NoneType |
30,011 | zeroconf._services.types | update_service | Service updated. | def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
"""Service updated."""
| (self, zc: zeroconf._core.Zeroconf, type_: str, name: str) -> NoneType |
30,027 | zeroconf._utils.net | add_multicast_member | null | def add_multicast_member(
listen_socket: socket.socket,
interface: Union[str, Tuple[Tuple[str, int, int], int]],
) -> bool:
# This is based on assumptions in normalize_interface_choice
is_v6 = isinstance(interface, tuple)
err_einval = {errno.EINVAL}
if sys.platform == 'win32':
# No WSAEINVAL definition in typeshed
err_einval |= {cast(Any, errno).WSAEINVAL} # pylint: disable=no-member
log.debug('Adding %r (socket %d) to multicast group', interface, listen_socket.fileno())
try:
if is_v6:
try:
mdns_addr6_bytes = socket.inet_pton(socket.AF_INET6, _MDNS_ADDR6)
except OSError:
log.info(
'Unable to translate IPv6 address when adding %s to multicast group, '
'this can happen if IPv6 is disabled on the system',
interface,
)
return False
iface_bin = struct.pack('@I', cast(int, interface[1]))
_value = mdns_addr6_bytes + iface_bin
listen_socket.setsockopt(_IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, _value)
else:
_value = socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(cast(str, interface))
listen_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, _value)
except OSError as e:
_errno = get_errno(e)
if _errno == errno.EADDRINUSE:
log.info(
'Address in use when adding %s to multicast group, '
'it is expected to happen on some systems',
interface,
)
return False
if _errno == errno.EADDRNOTAVAIL:
log.info(
'Address not available when adding %s to multicast '
'group, it is expected to happen on some systems',
interface,
)
return False
if _errno in err_einval:
log.info('Interface of %s does not support multicast, ' 'it is expected in WSL', interface)
return False
if _errno == errno.ENOPROTOOPT:
log.info(
'Failed to set socket option on %s, this can happen if '
'the network adapter is in a disconnected state',
interface,
)
return False
if is_v6 and _errno == errno.ENODEV:
log.info(
'Address in use when adding %s to multicast group, '
'it is expected to happen when the device does not have ipv6',
interface,
)
return False
raise
return True
| (listen_socket: socket.socket, interface: Union[str, Tuple[Tuple[str, int, int], int]]) -> bool |
30,028 | zeroconf._utils.net | autodetect_ip_version | Auto detect the IP version when it is not provided. | def autodetect_ip_version(interfaces: InterfacesType) -> IPVersion:
"""Auto detect the IP version when it is not provided."""
if isinstance(interfaces, list):
has_v6 = any(
isinstance(i, int) or (isinstance(i, str) and ipaddress.ip_address(i).version == 6)
for i in interfaces
)
has_v4 = any(isinstance(i, str) and ipaddress.ip_address(i).version == 4 for i in interfaces)
if has_v4 and has_v6:
return IPVersion.All
if has_v6:
return IPVersion.V6Only
return IPVersion.V4Only
| (interfaces: Union[Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]], zeroconf._utils.net.InterfaceChoice]) -> zeroconf._utils.net.IPVersion |
30,030 | zeroconf._utils.net | create_sockets | null | def create_sockets(
interfaces: InterfacesType = InterfaceChoice.All,
unicast: bool = False,
ip_version: IPVersion = IPVersion.V4Only,
apple_p2p: bool = False,
) -> Tuple[Optional[socket.socket], List[socket.socket]]:
if unicast:
listen_socket = None
else:
listen_socket = new_socket(ip_version=ip_version, apple_p2p=apple_p2p, bind_addr=('',))
normalized_interfaces = normalize_interface_choice(interfaces, ip_version)
# If we are using InterfaceChoice.Default we can use
# a single socket to listen and respond.
if not unicast and interfaces is InterfaceChoice.Default:
for i in normalized_interfaces:
add_multicast_member(cast(socket.socket, listen_socket), i)
return listen_socket, [cast(socket.socket, listen_socket)]
respond_sockets = []
for i in normalized_interfaces:
if not unicast:
if add_multicast_member(cast(socket.socket, listen_socket), i):
respond_socket = new_respond_socket(i, apple_p2p=apple_p2p)
else:
respond_socket = None
else:
respond_socket = new_socket(
port=0,
ip_version=ip_version,
apple_p2p=apple_p2p,
bind_addr=i[0] if isinstance(i, tuple) else (i,),
)
if respond_socket is not None:
respond_sockets.append(respond_socket)
return listen_socket, respond_sockets
| (interfaces: Union[Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]], zeroconf._utils.net.InterfaceChoice] = <InterfaceChoice.All: 2>, unicast: bool = False, ip_version: zeroconf._utils.net.IPVersion = <IPVersion.V4Only: 1>, apple_p2p: bool = False) -> Tuple[Optional[socket.socket], List[socket.socket]] |
30,031 | zeroconf._utils.net | get_all_addresses | null | def get_all_addresses() -> List[str]:
return list({addr.ip for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv4})
| () -> List[str] |
30,032 | zeroconf._utils.net | get_all_addresses_v6 | null | def get_all_addresses_v6() -> List[Tuple[Tuple[str, int, int], int]]:
# IPv6 multicast uses positive indexes for interfaces
# TODO: What about multi-address interfaces?
return list(
{(addr.ip, iface.index) for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv6}
)
| () -> List[Tuple[Tuple[str, int, int], int]] |
30,034 | pawl.linkedin | Linkedin | The Linkedin class provides convenient access to Linkedin's API. | class Linkedin:
"""The Linkedin class provides convenient access to Linkedin's API."""
def __init__(
self,
access_token=None,
client_id=None,
client_secret=None,
redirect_uri="http://localhost:8000",
token_manager=None,
):
assert access_token or (
client_id and client_secret
), "Either client_id and client_secret or an access token is required."
self._core = self._authorized_core = None
# TODO - Abstract these values for security
self._client_id = client_id
self._client_secret = client_secret
self._redirect_uri = redirect_uri
# TODO END
self._services = None
self._token_manager = token_manager
self._map_services()
self._prepare_core()
self.auth = service.Auth(self, None)
self.current_user = service.Me(linkedin=self, _data=None)
self.current_user_id = self._set_linkedin_user_id()
self.reactions = service.Reactions(linkedin=self, _data=None)
def _prepare_core(self, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class()
self._prepare_core_authenticator(requestor)
def _prepare_core_authenticator(self, requestor):
authenticator = Authenticator(
requestor, self._client_id, self._client_secret, self._redirect_uri
)
self._prepare_core_authorizer(authenticator)
def _prepare_core_authorizer(self, authenticator: Authenticator):
if self._token_manager is not None:
self._token_manager.linkedin = self
authorizer = Authorizer(
authenticator,
post_access_callback=self._token_manager.post_access_callback,
pre_access_callback=self._token_manager.pre_access_callback,
)
else:
# TODO - Add error handling
authorizer = Authorizer(authenticator)
self._core = self._authorized_core = session(authorizer)
def _map_services(self):
service_mappings = {
"Me": service.Me,
"Reactions": service.Reactions,
}
self._services = service_mappings
@staticmethod
def _parse_service_request(data: Optional[Union[Dict[str, Any], List[Any], bool]]):
# TODO - Restructure data for ease of use with python/utf-8
return data
def _service_request(
self,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json=None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through mapped services.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: None).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: None). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param params: The query parameters to add to the request (default: None).
:param path: The path to fetch.
"""
return self._parse_service_request(
data=self._core.request(
data=data,
json=json,
method=method,
params=params,
path=path,
)
)
def get(
self,
path: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: None).
"""
return self._service_request(method="GET", params=params, path=path)
def post(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
json=None,
):
return self._service_request(
data=data,
json=json,
method="POST",
params=params,
path=path,
)
def _set_linkedin_user_id(self):
if self._authorized_core._authorizer.access_token is None:
return self.current_user.basic_profile()["id"]
return None
| (access_token=None, client_id=None, client_secret=None, redirect_uri='http://localhost:8000', token_manager=None) |
30,035 | pawl.linkedin | __init__ | null | def __init__(
self,
access_token=None,
client_id=None,
client_secret=None,
redirect_uri="http://localhost:8000",
token_manager=None,
):
assert access_token or (
client_id and client_secret
), "Either client_id and client_secret or an access token is required."
self._core = self._authorized_core = None
# TODO - Abstract these values for security
self._client_id = client_id
self._client_secret = client_secret
self._redirect_uri = redirect_uri
# TODO END
self._services = None
self._token_manager = token_manager
self._map_services()
self._prepare_core()
self.auth = service.Auth(self, None)
self.current_user = service.Me(linkedin=self, _data=None)
self.current_user_id = self._set_linkedin_user_id()
self.reactions = service.Reactions(linkedin=self, _data=None)
| (self, access_token=None, client_id=None, client_secret=None, redirect_uri='http://localhost:8000', token_manager=None) |
30,036 | pawl.linkedin | _map_services | null | def _map_services(self):
service_mappings = {
"Me": service.Me,
"Reactions": service.Reactions,
}
self._services = service_mappings
| (self) |
30,037 | pawl.linkedin | _parse_service_request | null | @staticmethod
def _parse_service_request(data: Optional[Union[Dict[str, Any], List[Any], bool]]):
# TODO - Restructure data for ease of use with python/utf-8
return data
| (data: Union[Dict[str, Any], List[Any], bool, NoneType]) |
30,038 | pawl.linkedin | _prepare_core | null | def _prepare_core(self, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class()
self._prepare_core_authenticator(requestor)
| (self, requestor_class=None, requestor_kwargs=None) |
30,039 | pawl.linkedin | _prepare_core_authenticator | null | def _prepare_core_authenticator(self, requestor):
authenticator = Authenticator(
requestor, self._client_id, self._client_secret, self._redirect_uri
)
self._prepare_core_authorizer(authenticator)
| (self, requestor) |
30,040 | pawl.linkedin | _prepare_core_authorizer | null | def _prepare_core_authorizer(self, authenticator: Authenticator):
if self._token_manager is not None:
self._token_manager.linkedin = self
authorizer = Authorizer(
authenticator,
post_access_callback=self._token_manager.post_access_callback,
pre_access_callback=self._token_manager.pre_access_callback,
)
else:
# TODO - Add error handling
authorizer = Authorizer(authenticator)
self._core = self._authorized_core = session(authorizer)
| (self, authenticator: pawl.core.auth.Authenticator) |
30,041 | pawl.linkedin | _service_request | Run a request through mapped services.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: None).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: None). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param params: The query parameters to add to the request (default: None).
:param path: The path to fetch.
| def _service_request(
self,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json=None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through mapped services.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: None).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: None). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param params: The query parameters to add to the request (default: None).
:param path: The path to fetch.
"""
return self._parse_service_request(
data=self._core.request(
data=data,
json=json,
method=method,
params=params,
path=path,
)
)
| (self, data: Union[Dict[str, Union[str, Any]], bytes, IO, str, NoneType] = None, json=None, method: str = '', params: Union[str, Dict[str, str], NoneType] = None, path: str = '') -> Any |
30,042 | pawl.linkedin | _set_linkedin_user_id | null | def _set_linkedin_user_id(self):
if self._authorized_core._authorizer.access_token is None:
return self.current_user.basic_profile()["id"]
return None
| (self) |
30,043 | pawl.linkedin | get | Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: None).
| def get(
self,
path: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: None).
"""
return self._service_request(method="GET", params=params, path=path)
| (self, path: str, params: Union[str, Dict[str, Union[str, int]], NoneType] = None) |
30,044 | pawl.linkedin | post | null | def post(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
json=None,
):
return self._service_request(
data=data,
json=json,
method="POST",
params=params,
path=path,
)
| (self, path: str, data: Union[Dict[str, Union[str, Any]], bytes, IO, str, NoneType] = None, params: Union[str, Dict[str, Union[str, int]], NoneType] = None, json=None) |
30,051 | pfzy.match | fuzzy_match | Fuzzy find the needle within list of haystacks and get matched results with matching index.
Note:
The `key` argument is optional when the provided `haystacks` argument is a list of :class:`str`.
It will be given a default key `value` if not present.
Warning:
The `key` argument is required when provided `haystacks` argument is a list of :class:`dict`.
If not present, :class:`TypeError` will be raised.
Args:
needle: String to search within the `haystacks`.
haystacks: List of haystack/longer strings to be searched.
key: If `haystacks` is a list of dictionary, provide the key that
can obtain the haystack value to search.
batch_size: Number of entry to be processed together.
scorer (Callable[[str, str], SCORE_indices]): Desired scorer to use. Currently only :func:`~pfzy.score.fzy_scorer` and :func:`~pfzy.score.substr_scorer` is supported.
Raises:
TypeError: When the argument `haystacks` is :class:`list` of :class:`dict` and the `key` argument
is missing, :class:`TypeError` will be raised.
Returns:
List of matching `haystacks` with additional key indices and score.
Examples:
>>> import asyncio
>>> asyncio.run(fuzzy_match("ab", ["acb", "acbabc"]))
[{'value': 'acbabc', 'indices': [3, 4]}, {'value': 'acb', 'indices': [0, 2]}]
| result.sort(key=lambda x: x["score"], reverse=True)
| (needle: str, haystacks: List[Union[str, Dict[str, Any]]], key: str = '', batch_size: int = 4096, scorer: Optional[Callable[[str, str], Tuple[float, Optional[List[int]]]]] = None) -> List[Dict[str, Any]] |
30,052 | pfzy.score | fzy_scorer | Use fzy matching algorithem to match needle against haystack.
Note:
The `fzf` unordered search is not supported for performance concern.
When the provided `needle` is not a subsequence of `haystack` at all,
then `(-inf, None)` is returned.
See Also:
https://github.com/jhawthorn/fzy/blob/master/src/match.c
Args:
needle: Substring to find in haystack.
haystack: String to be searched and scored against.
Returns:
A tuple of matching score with a list of matching indices.
Examples:
>>> fzy_scorer("ab", "acb")
(0.89, [0, 2])
>>> fzy_scorer("ab", "acbabc")
(0.98, [3, 4])
>>> fzy_scorer("ab", "wc")
(-inf, None)
| def fzy_scorer(needle: str, haystack: str) -> SCORE_INDICES:
"""Use fzy matching algorithem to match needle against haystack.
Note:
The `fzf` unordered search is not supported for performance concern.
When the provided `needle` is not a subsequence of `haystack` at all,
then `(-inf, None)` is returned.
See Also:
https://github.com/jhawthorn/fzy/blob/master/src/match.c
Args:
needle: Substring to find in haystack.
haystack: String to be searched and scored against.
Returns:
A tuple of matching score with a list of matching indices.
Examples:
>>> fzy_scorer("ab", "acb")
(0.89, [0, 2])
>>> fzy_scorer("ab", "acbabc")
(0.98, [3, 4])
>>> fzy_scorer("ab", "wc")
(-inf, None)
"""
if _subsequence(needle, haystack):
return _score(needle, haystack)
else:
return SCORE_MIN, None
| (needle: str, haystack: str) -> Tuple[float, Optional[List[int]]] |
30,055 | pfzy.score | substr_scorer | Match needle against haystack using :meth:`str.find`.
Note:
Scores may be negative but the higher the score, the higher
the match rank. `-inf` score means no match found.
See Also:
https://github.com/aslpavel/sweep.py/blob/3f4a179b708059c12b9e5d76d1eb3c70bf2caadc/sweep.py#L837
Args:
needle: Substring to find in haystack.
haystack: String to be searched and scored against.
Returns:
A tuple of matching score with a list of matching indices.
Example:
>>> substr_scorer("ab", "awsab")
(-1.3, [3, 4])
>>> substr_scorer("ab", "abc")
(0.5, [0, 1])
>>> substr_scorer("ab", "iop")
(-inf, None)
>>> substr_scorer("ab", "asdafswabc")
(-1.6388888888888888, [7, 8])
>>> substr_scorer(" ", "asdf")
(0, [])
| def substr_scorer(needle: str, haystack: str) -> SCORE_INDICES:
"""Match needle against haystack using :meth:`str.find`.
Note:
Scores may be negative but the higher the score, the higher
the match rank. `-inf` score means no match found.
See Also:
https://github.com/aslpavel/sweep.py/blob/3f4a179b708059c12b9e5d76d1eb3c70bf2caadc/sweep.py#L837
Args:
needle: Substring to find in haystack.
haystack: String to be searched and scored against.
Returns:
A tuple of matching score with a list of matching indices.
Example:
>>> substr_scorer("ab", "awsab")
(-1.3, [3, 4])
>>> substr_scorer("ab", "abc")
(0.5, [0, 1])
>>> substr_scorer("ab", "iop")
(-inf, None)
>>> substr_scorer("ab", "asdafswabc")
(-1.6388888888888888, [7, 8])
>>> substr_scorer(" ", "asdf")
(0, [])
"""
indices = []
offset = 0
needle, haystack = needle.lower(), haystack.lower()
for needle in needle.split(" "):
if not needle:
continue
offset = haystack.find(needle, offset)
if offset < 0:
return SCORE_MIN, None
needle_len = len(needle)
indices.extend(range(offset, offset + needle_len))
offset += needle_len
if not indices:
return 0, indices
return (
-(indices[-1] + 1 - indices[0]) + 2 / (indices[0] + 1) + 1 / (indices[-1] + 1),
indices,
)
| (needle: str, haystack: str) -> Tuple[float, Optional[List[int]]] |
30,057 | networkx.exception | AmbiguousSolution | Raised if more than one valid solution exists for an intermediary step
of an algorithm.
In the face of ambiguity, refuse the temptation to guess.
This may occur, for example, when trying to determine the
bipartite node sets in a disconnected bipartite graph when
computing bipartite matchings.
| class AmbiguousSolution(NetworkXException):
"""Raised if more than one valid solution exists for an intermediary step
of an algorithm.
In the face of ambiguity, refuse the temptation to guess.
This may occur, for example, when trying to determine the
bipartite node sets in a disconnected bipartite graph when
computing bipartite matchings.
"""
| null |
30,058 | networkx.algorithms.tree.branchings | ArborescenceIterator |
Iterate over all spanning arborescences of a graph in either increasing or
decreasing cost.
Notes
-----
This iterator uses the partition scheme from [1]_ (included edges,
excluded edges and open edges). It generates minimum spanning
arborescences using a modified Edmonds' Algorithm which respects the
partition of edges. For arborescences with the same weight, ties are
broken arbitrarily.
References
----------
.. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning
trees in order of increasing cost, Pesquisa Operacional, 2005-08,
Vol. 25 (2), p. 219-229,
https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
| class ArborescenceIterator:
"""
Iterate over all spanning arborescences of a graph in either increasing or
decreasing cost.
Notes
-----
This iterator uses the partition scheme from [1]_ (included edges,
excluded edges and open edges). It generates minimum spanning
arborescences using a modified Edmonds' Algorithm which respects the
partition of edges. For arborescences with the same weight, ties are
broken arbitrarily.
References
----------
.. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning
trees in order of increasing cost, Pesquisa Operacional, 2005-08,
Vol. 25 (2), p. 219-229,
https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
"""
@dataclass(order=True)
class Partition:
"""
This dataclass represents a partition and stores a dict with the edge
data and the weight of the minimum spanning arborescence of the
partition dict.
"""
mst_weight: float
partition_dict: dict = field(compare=False)
def __copy__(self):
return ArborescenceIterator.Partition(
self.mst_weight, self.partition_dict.copy()
)
def __init__(self, G, weight="weight", minimum=True, init_partition=None):
"""
Initialize the iterator
Parameters
----------
G : nx.DiGraph
The directed graph which we need to iterate trees over
weight : String, default = "weight"
The edge attribute used to store the weight of the edge
minimum : bool, default = True
Return the trees in increasing order while true and decreasing order
while false.
init_partition : tuple, default = None
In the case that certain edges have to be included or excluded from
the arborescences, `init_partition` should be in the form
`(included_edges, excluded_edges)` where each edges is a
`(u, v)`-tuple inside an iterable such as a list or set.
"""
self.G = G.copy()
self.weight = weight
self.minimum = minimum
self.method = (
minimum_spanning_arborescence if minimum else maximum_spanning_arborescence
)
# Randomly create a key for an edge attribute to hold the partition data
self.partition_key = (
"ArborescenceIterators super secret partition attribute name"
)
if init_partition is not None:
partition_dict = {}
for e in init_partition[0]:
partition_dict[e] = nx.EdgePartition.INCLUDED
for e in init_partition[1]:
partition_dict[e] = nx.EdgePartition.EXCLUDED
self.init_partition = ArborescenceIterator.Partition(0, partition_dict)
else:
self.init_partition = None
def __iter__(self):
"""
Returns
-------
ArborescenceIterator
The iterator object for this graph
"""
self.partition_queue = PriorityQueue()
self._clear_partition(self.G)
# Write the initial partition if it exists.
if self.init_partition is not None:
self._write_partition(self.init_partition)
mst_weight = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
).size(weight=self.weight)
self.partition_queue.put(
self.Partition(
mst_weight if self.minimum else -mst_weight,
{}
if self.init_partition is None
else self.init_partition.partition_dict,
)
)
return self
def __next__(self):
"""
Returns
-------
(multi)Graph
The spanning tree of next greatest weight, which ties broken
arbitrarily.
"""
if self.partition_queue.empty():
del self.G, self.partition_queue
raise StopIteration
partition = self.partition_queue.get()
self._write_partition(partition)
next_arborescence = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
)
self._partition(partition, next_arborescence)
self._clear_partition(next_arborescence)
return next_arborescence
def _partition(self, partition, partition_arborescence):
"""
Create new partitions based of the minimum spanning tree of the
current minimum partition.
Parameters
----------
partition : Partition
The Partition instance used to generate the current minimum spanning
tree.
partition_arborescence : nx.Graph
The minimum spanning arborescence of the input partition.
"""
# create two new partitions with the data from the input partition dict
p1 = self.Partition(0, partition.partition_dict.copy())
p2 = self.Partition(0, partition.partition_dict.copy())
for e in partition_arborescence.edges:
# determine if the edge was open or included
if e not in partition.partition_dict:
# This is an open edge
p1.partition_dict[e] = nx.EdgePartition.EXCLUDED
p2.partition_dict[e] = nx.EdgePartition.INCLUDED
self._write_partition(p1)
try:
p1_mst = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
)
p1_mst_weight = p1_mst.size(weight=self.weight)
p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight
self.partition_queue.put(p1.__copy__())
except nx.NetworkXException:
pass
p1.partition_dict = p2.partition_dict.copy()
def _write_partition(self, partition):
"""
Writes the desired partition into the graph to calculate the minimum
spanning tree. Also, if one incoming edge is included, mark all others
as excluded so that if that vertex is merged during Edmonds' algorithm
we cannot still pick another of that vertex's included edges.
Parameters
----------
partition : Partition
A Partition dataclass describing a partition on the edges of the
graph.
"""
for u, v, d in self.G.edges(data=True):
if (u, v) in partition.partition_dict:
d[self.partition_key] = partition.partition_dict[(u, v)]
else:
d[self.partition_key] = nx.EdgePartition.OPEN
nx._clear_cache(self.G)
for n in self.G:
included_count = 0
excluded_count = 0
for u, v, d in self.G.in_edges(nbunch=n, data=True):
if d.get(self.partition_key) == nx.EdgePartition.INCLUDED:
included_count += 1
elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED:
excluded_count += 1
# Check that if there is an included edges, all other incoming ones
# are excluded. If not fix it!
if included_count == 1 and excluded_count != self.G.in_degree(n) - 1:
for u, v, d in self.G.in_edges(nbunch=n, data=True):
if d.get(self.partition_key) != nx.EdgePartition.INCLUDED:
d[self.partition_key] = nx.EdgePartition.EXCLUDED
def _clear_partition(self, G):
"""
Removes partition data from the graph
"""
for u, v, d in G.edges(data=True):
if self.partition_key in d:
del d[self.partition_key]
nx._clear_cache(self.G)
| (G, weight='weight', minimum=True, init_partition=None) |
30,059 | networkx.algorithms.tree.branchings | __init__ |
Initialize the iterator
Parameters
----------
G : nx.DiGraph
The directed graph which we need to iterate trees over
weight : String, default = "weight"
The edge attribute used to store the weight of the edge
minimum : bool, default = True
Return the trees in increasing order while true and decreasing order
while false.
init_partition : tuple, default = None
In the case that certain edges have to be included or excluded from
the arborescences, `init_partition` should be in the form
`(included_edges, excluded_edges)` where each edges is a
`(u, v)`-tuple inside an iterable such as a list or set.
| def __init__(self, G, weight="weight", minimum=True, init_partition=None):
"""
Initialize the iterator
Parameters
----------
G : nx.DiGraph
The directed graph which we need to iterate trees over
weight : String, default = "weight"
The edge attribute used to store the weight of the edge
minimum : bool, default = True
Return the trees in increasing order while true and decreasing order
while false.
init_partition : tuple, default = None
In the case that certain edges have to be included or excluded from
the arborescences, `init_partition` should be in the form
`(included_edges, excluded_edges)` where each edges is a
`(u, v)`-tuple inside an iterable such as a list or set.
"""
self.G = G.copy()
self.weight = weight
self.minimum = minimum
self.method = (
minimum_spanning_arborescence if minimum else maximum_spanning_arborescence
)
# Randomly create a key for an edge attribute to hold the partition data
self.partition_key = (
"ArborescenceIterators super secret partition attribute name"
)
if init_partition is not None:
partition_dict = {}
for e in init_partition[0]:
partition_dict[e] = nx.EdgePartition.INCLUDED
for e in init_partition[1]:
partition_dict[e] = nx.EdgePartition.EXCLUDED
self.init_partition = ArborescenceIterator.Partition(0, partition_dict)
else:
self.init_partition = None
| (self, G, weight='weight', minimum=True, init_partition=None) |
30,060 | networkx.algorithms.tree.branchings | __iter__ |
Returns
-------
ArborescenceIterator
The iterator object for this graph
| def __iter__(self):
"""
Returns
-------
ArborescenceIterator
The iterator object for this graph
"""
self.partition_queue = PriorityQueue()
self._clear_partition(self.G)
# Write the initial partition if it exists.
if self.init_partition is not None:
self._write_partition(self.init_partition)
mst_weight = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
).size(weight=self.weight)
self.partition_queue.put(
self.Partition(
mst_weight if self.minimum else -mst_weight,
{}
if self.init_partition is None
else self.init_partition.partition_dict,
)
)
return self
| (self) |
30,061 | networkx.algorithms.tree.branchings | __next__ |
Returns
-------
(multi)Graph
The spanning tree of next greatest weight, which ties broken
arbitrarily.
| def __next__(self):
"""
Returns
-------
(multi)Graph
The spanning tree of next greatest weight, which ties broken
arbitrarily.
"""
if self.partition_queue.empty():
del self.G, self.partition_queue
raise StopIteration
partition = self.partition_queue.get()
self._write_partition(partition)
next_arborescence = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
)
self._partition(partition, next_arborescence)
self._clear_partition(next_arborescence)
return next_arborescence
| (self) |
30,062 | networkx.algorithms.tree.branchings | _clear_partition |
Removes partition data from the graph
| def _clear_partition(self, G):
"""
Removes partition data from the graph
"""
for u, v, d in G.edges(data=True):
if self.partition_key in d:
del d[self.partition_key]
nx._clear_cache(self.G)
| (self, G) |
30,063 | networkx.algorithms.tree.branchings | _partition |
Create new partitions based of the minimum spanning tree of the
current minimum partition.
Parameters
----------
partition : Partition
The Partition instance used to generate the current minimum spanning
tree.
partition_arborescence : nx.Graph
The minimum spanning arborescence of the input partition.
| def _partition(self, partition, partition_arborescence):
"""
Create new partitions based of the minimum spanning tree of the
current minimum partition.
Parameters
----------
partition : Partition
The Partition instance used to generate the current minimum spanning
tree.
partition_arborescence : nx.Graph
The minimum spanning arborescence of the input partition.
"""
# create two new partitions with the data from the input partition dict
p1 = self.Partition(0, partition.partition_dict.copy())
p2 = self.Partition(0, partition.partition_dict.copy())
for e in partition_arborescence.edges:
# determine if the edge was open or included
if e not in partition.partition_dict:
# This is an open edge
p1.partition_dict[e] = nx.EdgePartition.EXCLUDED
p2.partition_dict[e] = nx.EdgePartition.INCLUDED
self._write_partition(p1)
try:
p1_mst = self.method(
self.G,
self.weight,
partition=self.partition_key,
preserve_attrs=True,
)
p1_mst_weight = p1_mst.size(weight=self.weight)
p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight
self.partition_queue.put(p1.__copy__())
except nx.NetworkXException:
pass
p1.partition_dict = p2.partition_dict.copy()
| (self, partition, partition_arborescence) |
30,064 | networkx.algorithms.tree.branchings | _write_partition |
Writes the desired partition into the graph to calculate the minimum
spanning tree. Also, if one incoming edge is included, mark all others
as excluded so that if that vertex is merged during Edmonds' algorithm
we cannot still pick another of that vertex's included edges.
Parameters
----------
partition : Partition
A Partition dataclass describing a partition on the edges of the
graph.
| def _write_partition(self, partition):
"""
Writes the desired partition into the graph to calculate the minimum
spanning tree. Also, if one incoming edge is included, mark all others
as excluded so that if that vertex is merged during Edmonds' algorithm
we cannot still pick another of that vertex's included edges.
Parameters
----------
partition : Partition
A Partition dataclass describing a partition on the edges of the
graph.
"""
for u, v, d in self.G.edges(data=True):
if (u, v) in partition.partition_dict:
d[self.partition_key] = partition.partition_dict[(u, v)]
else:
d[self.partition_key] = nx.EdgePartition.OPEN
nx._clear_cache(self.G)
for n in self.G:
included_count = 0
excluded_count = 0
for u, v, d in self.G.in_edges(nbunch=n, data=True):
if d.get(self.partition_key) == nx.EdgePartition.INCLUDED:
included_count += 1
elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED:
excluded_count += 1
# Check that if there is an included edges, all other incoming ones
# are excluded. If not fix it!
if included_count == 1 and excluded_count != self.G.in_degree(n) - 1:
for u, v, d in self.G.in_edges(nbunch=n, data=True):
if d.get(self.partition_key) != nx.EdgePartition.INCLUDED:
d[self.partition_key] = nx.EdgePartition.EXCLUDED
| (self, partition) |
30,065 | networkx.classes.digraph | DiGraph |
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes. By convention `None` is not used as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy
sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time="5pm")
>>> G.add_nodes_from([3], time="2pm")
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]["room"] = 714
>>> del G.nodes[1]["room"] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7)
>>> G.add_edges_from([(3, 4), (4, 5)], color="red")
>>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})])
>>> G[1][2]["weight"] = 4.7
>>> G.edges[1, 2]["weight"] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if "weight" in eattr:
... # Do something useful with the edges
... pass
But the edges reporting object is often more convenient:
>>> for u, v, weight in G.edges(data="weight"):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting usually provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes`, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names are
node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory,
adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherited without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
**Subclassing Example**
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {"weight": 1}
...
... def single_edge_dict(self):
... return self.all_edge_dict
...
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
| class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes. By convention `None` is not used as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy
sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time="5pm")
>>> G.add_nodes_from([3], time="2pm")
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]["room"] = 714
>>> del G.nodes[1]["room"] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7)
>>> G.add_edges_from([(3, 4), (4, 5)], color="red")
>>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})])
>>> G[1][2]["weight"] = 4.7
>>> G.edges[1, 2]["weight"] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if "weight" in eattr:
... # Do something useful with the edges
... pass
But the edges reporting object is often more convenient:
>>> for u, v, weight in G.edges(data="weight"):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting usually provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes`, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names are
node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory,
adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherited without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
**Subclassing Example**
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {"weight": 1}
...
... def single_edge_dict(self):
... return self.all_edge_dict
...
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
"""
_adj = _CachedPropertyResetterAdjAndSucc() # type: ignore[assignment]
_succ = _adj # type: ignore[has-type]
_pred = _CachedPropertyResetterPred()
def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a 2D NumPy array, a
SciPy sparse array, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # dictionary for node attr
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self._pred
# the successors of node n are stored in the dict self._succ=self._adj
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict successor
self._pred = self.adjlist_outer_dict_factory() # predecessor
# Note: self._succ = self._adj # successor
self.__networkx_cache__ = {}
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
@cached_property
def adj(self):
"""Graph adjacency object holding the neighbors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.adj behaves like a dict. Useful idioms include
`for nbr, datadict in G.adj[n].items():`.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` holds outgoing (successor) info.
"""
return AdjacencyView(self._succ)
@cached_property
def succ(self):
"""Graph adjacency object holding the successors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.succ behaves like a dict. Useful idioms include
`for nbr, datadict in G.succ[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):`
and a default can be set via a `default` argument to the `data` method.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` is identical to `G.succ`.
"""
return AdjacencyView(self._succ)
@cached_property
def pred(self):
"""Graph adjacency object holding the predecessors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.pred behaves like a dict. Useful idioms include
`for nbr, datadict in G.pred[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):`
A default can be set via a `default` argument to the `data` method.
"""
return AdjacencyView(self._pred)
def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._succ:
if node_for_adding is None:
raise ValueError("None cannot be a node")
self._succ[node_for_adding] = self.adjlist_inner_dict_factory()
self._pred[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
nx._clear_cache(self)
def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Notes
-----
When adding nodes from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.add_nodes_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])
>>> # wrong way - will raise RuntimeError
>>> # G.add_nodes_from(n + 1 for n in G.nodes)
>>> # correct way
>>> G.add_nodes_from(list(n + 1 for n in G.nodes))
"""
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
if n is None:
raise ValueError("None cannot be a node")
self._succ[n] = self.adjlist_inner_dict_factory()
self._pred[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
nx._clear_cache(self)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a nonexistent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> list(G.edges)
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> list(G.edges)
[]
"""
try:
nbrs = self._succ[n]
del self._node[n]
except KeyError as err: # NetworkXError if n not in self
raise NetworkXError(f"The node {n} is not in the digraph.") from err
for u in nbrs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # remove node from succ
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # remove node from pred
nx._clear_cache(self)
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently ignored.
See Also
--------
remove_node
Notes
-----
When removing nodes from an iterator over the graph you are changing,
a `RuntimeError` will be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.remove_nodes_from`.
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = list(G.nodes)
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> list(G.nodes)
[]
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])
>>> # this command will fail, as the graph's dict is modified during iteration
>>> # G.remove_nodes_from(n for n in G.nodes if n < 2)
>>> # this command will work, since the dictionary underlying graph is not modified
>>> G.remove_nodes_from(list(n for n in G.nodes if n < 2))
"""
for n in nodes:
try:
succs = self._succ[n]
del self._node[n]
for u in succs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # now remove node
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # now remove node
except KeyError:
pass # silent failure on remove
nx._clear_cache(self)
def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
nx._clear_cache(self)
def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_edges_from(((5, n) for n in G.nodes))
>>> # right way - note that there will be no self-edge for node 5
>>> G.add_edges_from(list((5, n) for n in G.nodes))
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {}
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
nx._clear_cache(self)
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.remove_edge(0, 1)
>>> e = (1, 2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2, 3, {"weight": 7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self._succ[u][v]
del self._pred[v][u]
except KeyError as err:
raise NetworkXError(f"The edge {u}-{v} not in graph.") from err
nx._clear_cache(self)
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u, v) edge between u and v.
- 3-tuples (u, v, k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> ebunch = [(1, 2), (2, 3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
u, v = e[:2] # ignore edge data
if u in self._succ and v in self._succ[u]:
del self._succ[u][v]
del self._pred[v][u]
nx._clear_cache(self)
def has_successor(self, u, v):
"""Returns True if node u has successor v.
This is true if graph has the edge u->v.
"""
return u in self._succ and v in self._succ[u]
def has_predecessor(self, u, v):
"""Returns True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return u in self._pred and v in self._pred[u]
def successors(self, n):
"""Returns an iterator over successor nodes of n.
A successor of n is a node m such that there exists a directed
edge from n to m.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
predecessors
Notes
-----
neighbors() and successors() are the same.
"""
try:
return iter(self._succ[n])
except KeyError as err:
raise NetworkXError(f"The node {n} is not in the digraph.") from err
# digraph definitions
neighbors = successors
def predecessors(self, n):
"""Returns an iterator over predecessor nodes of n.
A predecessor of n is a node m such that there exists a directed
edge from m to n.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
successors
"""
try:
return iter(self._pred[n])
except KeyError as err:
raise NetworkXError(f"The node {n} is not in the digraph.") from err
@cached_property
def edges(self):
"""An OutEdgeView of the DiGraph as G.edges or G.edges().
edges(self, nbunch=None, data=False, default=None)
The OutEdgeView provides set-like operations on the edge-tuples
as well as edge attribute lookup. When called, it also provides
an EdgeDataView object which allows control of access to edge
attributes (but does not provide set-like operations).
Hence, `G.edges[u, v]['color']` provides the value of the color
attribute for edge `(u, v)` while
`for (u, v, c) in G.edges.data('color', default='red'):`
iterates through all the edges yielding the color attribute
with default `'red'` if no color attribute exists.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges from these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edges : OutEdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
See Also
--------
in_edges, out_edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> nx.add_path(G, [0, 1, 2])
>>> G.add_edge(2, 3, weight=5)
>>> [e for e in G.edges]
[(0, 1), (1, 2), (2, 3)]
>>> G.edges.data() # default data is {} (empty dict)
OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})])
>>> G.edges.data("weight", default=1)
OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)])
>>> G.edges([0, 2]) # only edges originating from these nodes
OutEdgeDataView([(0, 1), (2, 3)])
>>> G.edges(0) # only edges from node 0
OutEdgeDataView([(0, 1)])
"""
return OutEdgeView(self)
# alias out_edges to edges
@cached_property
def out_edges(self):
return OutEdgeView(self)
out_edges.__doc__ = edges.__doc__
@cached_property
def in_edges(self):
"""A view of the in edges of the graph as G.in_edges or G.in_edges().
in_edges(self, nbunch=None, data=False, default=None):
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
in_edges : InEdgeView or InEdgeDataView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edge(1, 2, color="blue")
>>> G.in_edges()
InEdgeView([(1, 2)])
>>> G.in_edges(nbunch=2)
InEdgeDataView([(1, 2)])
See Also
--------
edges
"""
return InEdgeView(self)
@cached_property
def degree(self):
"""A DegreeView for the Graph as G.degree or G.degree().
The node degree is the number of edges adjacent to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator for (node, degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
DiDegreeView or int
If multiple nodes are requested (the default), returns a `DiDegreeView`
mapping nodes to their degree.
If a single node is requested, returns the degree of the node as an integer.
See Also
--------
in_degree, out_degree
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.degree(0) # node 0 with degree 1
1
>>> list(G.degree([0, 1, 2]))
[(0, 1), (1, 2), (2, 2)]
"""
return DiDegreeView(self)
@cached_property
def in_degree(self):
"""An InDegreeView for (node, in_degree) or in_degree for single node.
The node in_degree is the number of edges pointing to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iteration over (node, in_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
In-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, out_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.in_degree(0) # node 0 with degree 0
0
>>> list(G.in_degree([0, 1, 2]))
[(0, 0), (1, 1), (2, 1)]
"""
return InDegreeView(self)
@cached_property
def out_degree(self):
"""An OutDegreeView for (node, out_degree)
The node out_degree is the number of edges pointing out of the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator over (node, out_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Out-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.out_degree(0) # node 0 with degree 1
1
>>> list(G.out_degree([0, 1, 2]))
[(0, 1), (1, 1), (2, 1)]
"""
return OutDegreeView(self)
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._succ.clear()
self._pred.clear()
self._node.clear()
self.graph.clear()
nx._clear_cache(self)
def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for predecessor_dict in self._pred.values():
predecessor_dict.clear()
for successor_dict in self._succ.values():
successor_dict.clear()
nx._clear_cache(self)
def is_multigraph(self):
"""Returns True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Returns True if graph is directed, False otherwise."""
return True
def to_undirected(self, reciprocal=False, as_view=False):
"""Returns an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
as_view : bool (optional, default=False)
If True return an undirected view of the original directed graph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u, v, data) if either (u, v, data) or (v, u, data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
See Also
--------
Graph, copy, add_edge, add_edges_from
Notes
-----
If edges in both directions (u, v) and (v, u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the
Graph created by this method.
Examples
--------
>>> G = nx.path_graph(2) # or MultiGraph, etc
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> list(G2.edges)
[(0, 1)]
"""
graph_class = self.to_undirected_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
if reciprocal is True:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
if v in self._pred[u]
)
else:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
)
return G
def reverse(self, copy=True):
"""Returns the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, the reverse graph is created using a view of
the original graph.
"""
if copy:
H = self.__class__()
H.graph.update(deepcopy(self.graph))
H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items())
H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True))
return H
return nx.reverse_view(self)
| (incoming_graph_data=None, **attr) |
30,066 | networkx.classes.graph | __contains__ | Returns True if n is a node, False otherwise. Use: 'n in G'.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> 1 in G
True
| def __contains__(self, n):
"""Returns True if n is a node, False otherwise. Use: 'n in G'.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> 1 in G
True
"""
try:
return n in self._node
except TypeError:
return False
| (self, n) |
30,067 | networkx.classes.graph | __getitem__ | Returns a dict of neighbors of node n. Use: 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is the same as G.adj[n] and similar to G.neighbors(n)
(which is an iterator over G.adj[n])
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0]
AtlasView({1: {}})
| def __getitem__(self, n):
"""Returns a dict of neighbors of node n. Use: 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is the same as G.adj[n] and similar to G.neighbors(n)
(which is an iterator over G.adj[n])
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0]
AtlasView({1: {}})
"""
return self.adj[n]
| (self, n) |
30,068 | networkx.classes.digraph | __init__ | Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a 2D NumPy array, a
SciPy sparse array, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
| def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a 2D NumPy array, a
SciPy sparse array, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # dictionary for node attr
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self._pred
# the successors of node n are stored in the dict self._succ=self._adj
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict successor
self._pred = self.adjlist_outer_dict_factory() # predecessor
# Note: self._succ = self._adj # successor
self.__networkx_cache__ = {}
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
| (self, incoming_graph_data=None, **attr) |
30,069 | networkx.classes.graph | __iter__ | Iterate over the nodes. Use: 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G]
[0, 1, 2, 3]
>>> list(G)
[0, 1, 2, 3]
| def __iter__(self):
"""Iterate over the nodes. Use: 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G]
[0, 1, 2, 3]
>>> list(G)
[0, 1, 2, 3]
"""
return iter(self._node)
| (self) |
30,070 | networkx.classes.graph | __len__ | Returns the number of nodes in the graph. Use: 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes: identical method
order: identical method
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> len(G)
4
| def __len__(self):
"""Returns the number of nodes in the graph. Use: 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes: identical method
order: identical method
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> len(G)
4
"""
return len(self._node)
| (self) |
30,071 | networkx.classes.graph | __str__ | Returns a short summary of the graph.
Returns
-------
info : string
Graph information including the graph name (if any), graph type, and the
number of nodes and edges.
Examples
--------
>>> G = nx.Graph(name="foo")
>>> str(G)
"Graph named 'foo' with 0 nodes and 0 edges"
>>> G = nx.path_graph(3)
>>> str(G)
'Graph with 3 nodes and 2 edges'
| def __str__(self):
"""Returns a short summary of the graph.
Returns
-------
info : string
Graph information including the graph name (if any), graph type, and the
number of nodes and edges.
Examples
--------
>>> G = nx.Graph(name="foo")
>>> str(G)
"Graph named 'foo' with 0 nodes and 0 edges"
>>> G = nx.path_graph(3)
>>> str(G)
'Graph with 3 nodes and 2 edges'
"""
return "".join(
[
type(self).__name__,
f" named {self.name!r}" if self.name else "",
f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges",
]
)
| (self) |
30,072 | networkx.classes.digraph | add_edge | Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
| def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
nx._clear_cache(self)
| (self, u_of_edge, v_of_edge, **attr) |
30,073 | networkx.classes.digraph | add_edges_from | Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_edges_from(((5, n) for n in G.nodes))
>>> # right way - note that there will be no self-edge for node 5
>>> G.add_edges_from(list((5, n) for n in G.nodes))
| def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_edges_from(((5, n) for n in G.nodes))
>>> # right way - note that there will be no self-edge for node 5
>>> G.add_edges_from(list((5, n) for n in G.nodes))
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {}
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
nx._clear_cache(self)
| (self, ebunch_to_add, **attr) |
30,074 | networkx.classes.digraph | add_node | Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
| def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._succ:
if node_for_adding is None:
raise ValueError("None cannot be a node")
self._succ[node_for_adding] = self.adjlist_inner_dict_factory()
self._pred[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
nx._clear_cache(self)
| (self, node_for_adding, **attr) |
30,075 | networkx.classes.digraph | add_nodes_from | Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Notes
-----
When adding nodes from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.add_nodes_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])
>>> # wrong way - will raise RuntimeError
>>> # G.add_nodes_from(n + 1 for n in G.nodes)
>>> # correct way
>>> G.add_nodes_from(list(n + 1 for n in G.nodes))
| def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Notes
-----
When adding nodes from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_nodes)`, and pass this
object to `G.add_nodes_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
Evaluate an iterator over a graph if using it to modify the same graph
>>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])
>>> # wrong way - will raise RuntimeError
>>> # G.add_nodes_from(n + 1 for n in G.nodes)
>>> # correct way
>>> G.add_nodes_from(list(n + 1 for n in G.nodes))
"""
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
if n is None:
raise ValueError("None cannot be a node")
self._succ[n] = self.adjlist_inner_dict_factory()
self._pred[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
nx._clear_cache(self)
| (self, nodes_for_adding, **attr) |
30,076 | networkx.classes.graph | add_weighted_edges_from | Add weighted edges in `ebunch_to_add` with specified weight attr
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u, v, w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_weighted_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)])
Evaluate an iterator over edges before passing it
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> weight = 0.1
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_weighted_edges_from(((5, n, weight) for n in G.nodes))
>>> # correct way - note that there will be no self-edge for node 5
>>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes))
| def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr):
"""Add weighted edges in `ebunch_to_add` with specified weight attr
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u, v, w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
When adding edges from an iterator over the graph you are changing,
a `RuntimeError` can be raised with message:
`RuntimeError: dictionary changed size during iteration`. This
happens when the graph's underlying dictionary is modified during
iteration. To avoid this error, evaluate the iterator into a separate
object, e.g. by using `list(iterator_of_edges)`, and pass this
object to `G.add_weighted_edges_from`.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)])
Evaluate an iterator over edges before passing it
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> weight = 0.1
>>> # Grow graph by one new node, adding edges to all existing nodes.
>>> # wrong way - will raise RuntimeError
>>> # G.add_weighted_edges_from(((5, n, weight) for n in G.nodes))
>>> # correct way - note that there will be no self-edge for node 5
>>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes))
"""
self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr)
nx._clear_cache(self)
| (self, ebunch_to_add, weight='weight', **attr) |
30,077 | networkx.classes.graph | adjacency | Returns an iterator over (node, adjacency dict) tuples for all nodes.
For directed graphs, only outgoing neighbors/adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator over (node, adjacency dictionary) for all nodes in
the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [(n, nbrdict) for n, nbrdict in G.adjacency()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
| def adjacency(self):
"""Returns an iterator over (node, adjacency dict) tuples for all nodes.
For directed graphs, only outgoing neighbors/adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator over (node, adjacency dictionary) for all nodes in
the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [(n, nbrdict) for n, nbrdict in G.adjacency()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
"""
return iter(self._adj.items())
| (self) |
30,078 | networkx.classes.digraph | clear | Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
| def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._succ.clear()
self._pred.clear()
self._node.clear()
self.graph.clear()
nx._clear_cache(self)
| (self) |
30,079 | networkx.classes.digraph | clear_edges | Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
| def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for predecessor_dict in self._pred.values():
predecessor_dict.clear()
for successor_dict in self._succ.values():
successor_dict.clear()
nx._clear_cache(self)
| (self) |
30,080 | networkx.classes.graph | copy | Returns a copy of the graph.
The copy method by default returns an independent shallow copy
of the graph and attributes. That is, if an attribute is a
container, that container is shared by the original an the copy.
Use Python's `copy.deepcopy` for new containers.
If `as_view` is True then a view is returned instead of a copy.
Notes
-----
All copies reproduce the graph structure, but data attributes
may be handled in different ways. There are four types of copies
of a graph that people might want.
Deepcopy -- A "deepcopy" copies the graph structure as well as
all data attributes and any objects they might contain.
The entire graph object is new so that changes in the copy
do not affect the original object. (see Python's copy.deepcopy)
Data Reference (Shallow) -- For a shallow copy the graph structure
is copied but the edge, node and graph attribute dicts are
references to those in the original graph. This saves
time and memory but could cause confusion if you change an attribute
in one graph and it changes the attribute in the other.
NetworkX does not provide this level of shallow copy.
Independent Shallow -- This copy creates new independent attribute
dicts and then does a shallow copy of the attributes. That is, any
attributes that are containers are shared between the new graph
and the original. This is exactly what `dict.copy()` provides.
You can obtain this style copy using:
>>> G = nx.path_graph(5)
>>> H = G.copy()
>>> H = G.copy(as_view=False)
>>> H = nx.Graph(G)
>>> H = G.__class__(G)
Fresh Data -- For fresh data, the graph structure is copied while
new empty data attribute dicts are created. The resulting graph
is independent of the original and it has no edge, node or graph
attributes. Fresh copies are not enabled. Instead use:
>>> H = G.__class__()
>>> H.add_nodes_from(G)
>>> H.add_edges_from(G.edges)
View -- Inspired by dict-views, graph-views act like read-only
versions of the original graph, providing a copy of the original
structure without requiring any memory for copying the information.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Parameters
----------
as_view : bool, optional (default=False)
If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.copy()
| def copy(self, as_view=False):
"""Returns a copy of the graph.
The copy method by default returns an independent shallow copy
of the graph and attributes. That is, if an attribute is a
container, that container is shared by the original an the copy.
Use Python's `copy.deepcopy` for new containers.
If `as_view` is True then a view is returned instead of a copy.
Notes
-----
All copies reproduce the graph structure, but data attributes
may be handled in different ways. There are four types of copies
of a graph that people might want.
Deepcopy -- A "deepcopy" copies the graph structure as well as
all data attributes and any objects they might contain.
The entire graph object is new so that changes in the copy
do not affect the original object. (see Python's copy.deepcopy)
Data Reference (Shallow) -- For a shallow copy the graph structure
is copied but the edge, node and graph attribute dicts are
references to those in the original graph. This saves
time and memory but could cause confusion if you change an attribute
in one graph and it changes the attribute in the other.
NetworkX does not provide this level of shallow copy.
Independent Shallow -- This copy creates new independent attribute
dicts and then does a shallow copy of the attributes. That is, any
attributes that are containers are shared between the new graph
and the original. This is exactly what `dict.copy()` provides.
You can obtain this style copy using:
>>> G = nx.path_graph(5)
>>> H = G.copy()
>>> H = G.copy(as_view=False)
>>> H = nx.Graph(G)
>>> H = G.__class__(G)
Fresh Data -- For fresh data, the graph structure is copied while
new empty data attribute dicts are created. The resulting graph
is independent of the original and it has no edge, node or graph
attributes. Fresh copies are not enabled. Instead use:
>>> H = G.__class__()
>>> H.add_nodes_from(G)
>>> H.add_edges_from(G.edges)
View -- Inspired by dict-views, graph-views act like read-only
versions of the original graph, providing a copy of the original
structure without requiring any memory for copying the information.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Parameters
----------
as_view : bool, optional (default=False)
If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.copy()
"""
if as_view is True:
return nx.graphviews.generic_graph_view(self)
G = self.__class__()
G.graph.update(self.graph)
G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
G.add_edges_from(
(u, v, datadict.copy())
for u, nbrs in self._adj.items()
for v, datadict in nbrs.items()
)
return G
| (self, as_view=False) |
30,081 | networkx.classes.graph | edge_subgraph | Returns the subgraph induced by the specified edges.
The induced subgraph contains each edge in `edges` and each
node incident to any one of those edges.
Parameters
----------
edges : iterable
An iterable of edges in this graph.
Returns
-------
G : Graph
An edge-induced subgraph of this graph with the same edge
attributes.
Notes
-----
The graph, edge, and node attributes in the returned subgraph
view are references to the corresponding attributes in the original
graph. The view is read-only.
To create a full graph version of the subgraph with its own copy
of the edge or node attributes, use::
G.edge_subgraph(edges).copy()
Examples
--------
>>> G = nx.path_graph(5)
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
>>> list(H.nodes)
[0, 1, 3, 4]
>>> list(H.edges)
[(0, 1), (3, 4)]
| def edge_subgraph(self, edges):
"""Returns the subgraph induced by the specified edges.
The induced subgraph contains each edge in `edges` and each
node incident to any one of those edges.
Parameters
----------
edges : iterable
An iterable of edges in this graph.
Returns
-------
G : Graph
An edge-induced subgraph of this graph with the same edge
attributes.
Notes
-----
The graph, edge, and node attributes in the returned subgraph
view are references to the corresponding attributes in the original
graph. The view is read-only.
To create a full graph version of the subgraph with its own copy
of the edge or node attributes, use::
G.edge_subgraph(edges).copy()
Examples
--------
>>> G = nx.path_graph(5)
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
>>> list(H.nodes)
[0, 1, 3, 4]
>>> list(H.edges)
[(0, 1), (3, 4)]
"""
return nx.edge_subgraph(self, edges)
| (self, edges) |
30,082 | networkx.classes.graph | get_edge_data | Returns the attribute dictionary associated with edge (u, v).
This is identical to `G[u][v]` except the default is returned
instead of an exception if the edge doesn't exist.
Parameters
----------
u, v : nodes
default: any Python object (default=None)
Value to return if the edge (u, v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0][1]
{}
Warning: Assigning to `G[u][v]` is not permitted.
But it is safe to assign attributes `G[u][v]['foo']`
>>> G[0][1]["weight"] = 7
>>> G[0][1]["weight"]
7
>>> G[1][0]["weight"]
7
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.get_edge_data(0, 1) # default edge data is {}
{}
>>> e = (0, 1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0
0
| def get_edge_data(self, u, v, default=None):
"""Returns the attribute dictionary associated with edge (u, v).
This is identical to `G[u][v]` except the default is returned
instead of an exception if the edge doesn't exist.
Parameters
----------
u, v : nodes
default: any Python object (default=None)
Value to return if the edge (u, v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0][1]
{}
Warning: Assigning to `G[u][v]` is not permitted.
But it is safe to assign attributes `G[u][v]['foo']`
>>> G[0][1]["weight"] = 7
>>> G[0][1]["weight"]
7
>>> G[1][0]["weight"]
7
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.get_edge_data(0, 1) # default edge data is {}
{}
>>> e = (0, 1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0
0
"""
try:
return self._adj[u][v]
except KeyError:
return default
| (self, u, v, default=None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.