repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
huggingface/trl
1,617
huggingface__trl-1617
[ "1543" ]
75de236c09bd5846f79c24d9bf371481b0b7582c
diff --git a/trl/models/utils.py b/trl/models/utils.py --- a/trl/models/utils.py +++ b/trl/models/utils.py @@ -1,3 +1,4 @@ +import itertools from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union @@ -118,6 +119,9 @@ def remove_hooks(model: "DeepSpeedEngine") -> None: elif model.optimizer is not None: optimizer_offload = model.optimizer + for param in iter_params(optimizer_offload.module, recurse=True): + param.ds_active_sub_modules.clear() + for hook in optimizer_offload.forward_hooks: hook.remove() for hook in optimizer_offload.backward_hooks: @@ -127,6 +131,14 @@ def remove_hooks(model: "DeepSpeedEngine") -> None: optimizer_offload.backward_hooks = [] +def get_all_parameters(sub_module, recurse=False): + return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) + + +def iter_params(module, recurse=False): + return [param for _, param in get_all_parameters(module, recurse)] + + def add_hooks(model: "DeepSpeedEngine") -> None: """Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): @@ -141,7 +153,6 @@ def unwrap_model_for_generation( model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False ) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]: """Context manager to unwrap a model for generation. - For ZeRO-3 models, we gather the weights once to speed up generation. """ unwrapped_model = accelerator.unwrap_model(model)
Speed up ZeRO-3 generation with DPO Hi, a [recent PR](https://github.com/huggingface/trl/pull/1483) brought large improvements (x10) to PPO generation with ZeRO-3. @lewtun, you mention on the PR that it can be adapted for other trainers. I gave it a quick shot and it seems that naive applying the context manager to trainers like DPO does not work: ``` in remove_hooks if model.optimizer is not None and hasattr( ^^^^^^^^^^^^^^^^^^^^ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") AttributeError: 'GPTNeoXForCausalLM' object has no attribute 'optimizer' ``` There seems to be an inconsistency between the base classes. Is there a reason why DPO is based on Trainer from transformers and PPO on BaseTrainer ? What would be the easy way to add this feature to other trainers ? Thanks !
Passing **self.model_wrapped** instead in **unwrap_model_for_generation** in gives: ``` deepspeed/runtime/zero/partitioned_param_coordinator.py", line 194, in record_parameters step_id = self.__step_id_module_fetched_for[sub_module.id].popleft() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ IndexError: pop from an empty deque ``` Is it related to the way the model removes/adds hooks ?
2024-05-03T09:51:40
huggingface/trl
1,713
huggingface__trl-1713
[ "1712" ]
b8b972fde183ec036885738e1439cd99877c2ad5
diff --git a/trl/commands/cli_utils.py b/trl/commands/cli_utils.py --- a/trl/commands/cli_utils.py +++ b/trl/commands/cli_utils.py @@ -75,7 +75,9 @@ def merge_dataclasses(self, dataclasses): field_name = data_class_field.name field_value = getattr(dataclass, field_name) - if not isinstance(dataclass, TrainingArguments): + if not isinstance(dataclass, TrainingArguments) or not hasattr( + self._dummy_training_args, field_name + ): default_value = data_class_field.default else: default_value = ( @@ -95,12 +97,13 @@ def merge_dataclasses(self, dataclasses): setattr(dataclasses_copy[i], field_name, value_to_replace) # Otherwise do nothing - # Re-init `TrainingArguments` to handle all post-processing correctly + # Re-init `TrainingArguments` or derived class to handle all post-processing correctly if is_hf_training_args: - init_signature = list(inspect.signature(TrainingArguments.__init__).parameters) + ArgCls = type(dataclass) + init_signature = list(inspect.signature(ArgCls.__init__).parameters) dict_dataclass = asdict(dataclasses_copy[i]) new_dict_dataclass = {k: v for k, v in dict_dataclass.items() if k in init_signature} - dataclasses_copy[i] = TrainingArguments(**new_dict_dataclass) + dataclasses_copy[i] = ArgCls(**new_dict_dataclass) return dataclasses_copy @@ -141,12 +144,16 @@ def warning_handler(message, category, filename, lineno, file=None, line=None): @dataclass class SFTScriptArguments: - dataset_name: str = field(default="timdettmers/openassistant-guanaco", metadata={"help": "the dataset name"}) + dataset_name: str = field( + default="timdettmers/openassistant-guanaco", + metadata={"help": "the dataset name"}, + ) dataset_train_split: str = field(default="train", metadata={"help": "The dataset split to train on"}) dataset_test_split: str = field(default="test", metadata={"help": "The dataset split to evaluate on"}) config: str = field(default=None, metadata={"help": "Path to the optional config file"}) gradient_checkpointing_use_reentrant: bool = field( - default=False, metadata={"help": "Whether to apply `use_reentrant` for gradient_checkpointing"} + default=False, + metadata={"help": "Whether to apply `use_reentrant` for gradient_checkpointing"}, ) @@ -166,7 +173,8 @@ class DPOScriptArguments: ) config: str = field(default=None, metadata={"help": "Path to the optional config file"}) gradient_checkpointing_use_reentrant: bool = field( - default=False, metadata={"help": "Whether to apply `use_reentrant` for gradient_checkpointing"} + default=False, + metadata={"help": "Whether to apply `use_reentrant` for gradient_checkpointing"}, ) @@ -229,10 +237,12 @@ class ChatArguments: }, ) load_in_8bit: bool = field( - default=False, metadata={"help": "use 8 bit precision for the base model - works only with LoRA"} + default=False, + metadata={"help": "use 8 bit precision for the base model - works only with LoRA"}, ) load_in_4bit: bool = field( - default=False, metadata={"help": "use 4 bit precision for the base model - works only with LoRA"} + default=False, + metadata={"help": "use 4 bit precision for the base model - works only with LoRA"}, ) bnb_4bit_quant_type: str = field(default="nf4", metadata={"help": "precise the quantization type (fp4 or nf4)"}) @@ -264,7 +274,10 @@ def post_process_dataclasses(self, dataclasses): if dataclass_obj.__class__.__name__ == "TrainingArguments": training_args = dataclass_obj training_args_index = i - elif dataclass_obj.__class__.__name__ in ("SFTScriptArguments", "DPOScriptArguments"): + elif dataclass_obj.__class__.__name__ in ( + "SFTScriptArguments", + "DPOScriptArguments", + ): trl_args = dataclass_obj else: ...
YamlConfigParser fails on RewardConfig, DPOConfig etc.. Using the YamlConfigParser with derived classes of `TrainingArguments` throws an error because it assumes all classes that are `isinstance(class, TrainingArguments)` must be `TrainingArguments` and not a derived class
2024-06-06T22:57:04
watchdogpolska/feder
311
watchdogpolska__feder-311
[ "287", "286" ]
b42b305fb3868847f95e7183bcfe759edeceb0e9
diff --git a/config/settings/common.py b/config/settings/common.py --- a/config/settings/common.py +++ b/config/settings/common.py @@ -326,6 +326,8 @@ }, 'gilab': {'SCOPE': ['read_user', 'openid']} } +EMAIL_NOTIFICATION = env('EMAIL_NOTIFICATION', default='[email protected]') + EMAILLABS_APP_KEY = env('EMAILLABS_APP_KEY', default="Dummy") EMAILLABS_SECRET_KEY = env('EMAILLABS_SECRET_KEY', default="Dummy")
diff --git a/feder/alerts/tests.py b/feder/alerts/tests.py --- a/feder/alerts/tests.py +++ b/feder/alerts/tests.py @@ -37,6 +37,11 @@ class AlertCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): def get_url(self): return reverse('alerts:create', kwargs={"monitoring": self.monitoring.pk}) + def test_create(self): + self.grant_permission() + self.client.login(username='john', password='pass') + self.client.post(self.get_url(), {'reason': 'test'}) + class AlertUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): permission = ['monitorings.change_alert', ] @@ -65,7 +70,7 @@ def test_status_action(self): self.alert.refresh_from_db() self.assertEqual(self.alert.solver, self.user) self.assertEqual(self.alert.status, True) - self.client.post(self.get_url(), {}) self.alert.refresh_from_db() self.assertEqual(self.alert.status, False) +
Tytuły podstron Wymagają aktualizacji tytuły (```<title>(…)</title>```) większości podstron w serwisie, aby odzwierciedlały podstronę na której użytkownik się znajduje. Coś bliskiego odwróconemu breadcrumbs bez linków może być. To wymaga więdzy wyłącznie z zakresu HTML-a, a można poobcować z aplikacją. Django-debug-toolbar dokładnie wskazuje, który plik wymaga modyfikacji. AttributeError: 'Settings' object has no attribute 'EMAIL_NOTIFICATION' ``` AttributeError: 'Settings' object has no attribute 'EMAIL_NOTIFICATION' File "django/core/handlers/exception.py", line 41, in inner response = get_response(request) File "django/core/handlers/base.py", line 249, in _legacy_get_response response = self._get_response(request) File "django/core/handlers/base.py", line 187, in _get_response response = self.process_exception_by_middleware(e, request) File "django/core/handlers/base.py", line 185, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "django/views/generic/base.py", line 68, in view return self.dispatch(request, *args, **kwargs) File "django/views/generic/base.py", line 88, in dispatch return handler(request, *args, **kwargs) File "atom/views.py", line 81, in post response = super(ActionMessageMixin, self).post(request, *args, **kwargs) File "atom/views.py", line 55, in post self.action() File "feder/letters/views.py", line 234, in action link_object=self.object) File "django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "django/db/models/query.py", line 394, in create obj.save(force_insert=True, using=self.db) File "django/db/models/base.py", line 806, in save force_update=force_update, update_fields=update_fields) File "django/db/models/base.py", line 846, in save_base update_fields=update_fields, raw=raw, using=using, File "django/dispatch/dispatcher.py", line 193, in send for receiver in self._live_receivers(sender) File "feder/alerts/models.py", line 74, in notify_users from_email=settings.EMAIL_NOTIFICATION, File "django/conf/__init__.py", line 57, in __getattr__ val = getattr(self._wrapped, name) ```
@ad-m Mogę się tym zająć 1. Zauważyłem, że "główne" podstrony mają title poprawny. Rozumiem, że chodzi o strony np. pojedynczej instytucji, sprawy, monitoringu itp.? @jacekll , tak, aby było na zakładce było widać czego dotyczy podstrona. @ad-m czy wystarczy skonfigurować adres e-mail nadawcy? Jeśli tak, to jaki adres ustawić? w config/settings/common.py? @jacekll , tak, wystarczy skonfigurować, ja proponuje domyślny ``[email protected]`` i zrealizować to poprzez [django-environ](https://github.com/joke2k/django-environ), aby można było w razie czego zmienić, albo aby ktoś używający aplikacji mógł to zmienić. BTW. Polecam [The Twele-Factor App](https://12factor.net/pl/), jeżeli nie znasz. Ten błąd mi wyskoczył w praktyce, a nie został objęty żadnymi testami, więc przy okazji możesz też pierw napisać test, który failuje, a dopiero wtedy naprawić (zabezpieczenie przed regresją tego issues), bo wskazuje to, że mamy jakiś problem z pokryciem ``File "feder/letters/views.py", line 234`` .
2017-09-02T12:27:22
watchdogpolska/feder
312
watchdogpolska__feder-312
[ "290" ]
69d6aaed995c46fad3c7bbea9831d9d05ccf5106
diff --git a/feder/letters/models.py b/feder/letters/models.py --- a/feder/letters/models.py +++ b/feder/letters/models.py @@ -24,7 +24,6 @@ from django_mailbox.signals import message_received from model_utils import Choices from model_utils.models import TimeStampedModel - from feder.cases.models import Case from feder.institutions.models import Institution from .utils import email_wrapper, normalize_msg_id @@ -114,8 +113,16 @@ def is_incoming(self): def is_outgoing(self): return bool(self.author_user_id) + def get_title(self): + if self.title and self.title.strip(): + return self.title + return _("(no subject)") + def __str__(self): - return self.title + return unicode(self.get_title()) + + def __unicode__(self): + return unicode(self.get_title()) def get_absolute_url(self): return reverse('letters:details', kwargs={'pk': self.pk})
diff --git a/feder/letters/tests/test_model.py b/feder/letters/tests/test_model.py --- a/feder/letters/tests/test_model.py +++ b/feder/letters/tests/test_model.py @@ -26,6 +26,11 @@ def test_is_outgoing(self): self.assertTrue(OutgoingLetterFactory().is_outgoing) self.assertFalse(IncomingLetterFactory().is_outgoing) + def test_default_subject(self): + incoming = IncomingLetterFactory() + incoming.title = '' + self.assertGreater(len(str(incoming)), 0) + def test_author_for_user(self): obj = OutgoingLetterFactory() self.assertEqual(obj.author, obj.author_user)
Domyślny tytuł wiadomości określić https://fedrowanie.siecobywatelska.pl//listy/70 Należy napisać ```get_*_display```.
Obawiam się, że get_*_display może nie zadziałać. Trzeba będzie ``__str__`` nadpisać, albo zbudować właściwość (getter) z tym.
2017-09-02T13:12:53
watchdogpolska/feder
317
watchdogpolska__feder-317
[ "42" ]
ce84da2d5a250104ab01ab861f52e305851c525a
diff --git a/feder/letters/logs/models.py b/feder/letters/logs/models.py --- a/feder/letters/logs/models.py +++ b/feder/letters/logs/models.py @@ -33,7 +33,7 @@ def with_logrecord_count(self): class EmailLog(TimeStampedModel): status = models.CharField(choices=STATUS, default=STATUS.unknown, max_length=20) case = models.ForeignKey(Case, max_length=_("Case")) - letter = models.ForeignKey(Letter, max_length=_("Letter"), null=True, blank=True) + letter = models.OneToOneField(Letter, max_length=_("Letter"), null=True, blank=True) email_id = models.CharField(verbose_name=_("Message-ID"), max_length=255) to = models.CharField(verbose_name=_("To"), max_length=255) objects = EmailQuerySet.as_manager()
Dopisać tooltip z metadanymi na LetterDetailView ![zaznaczenie_418](https://cloud.githubusercontent.com/assets/3618479/9562630/74158af4-4e71-11e5-8c87-566c046f1840.png) http://giodo.gov.pl/1520221/id_art/8813/j/pl/
2017-09-08T14:53:10
watchdogpolska/feder
320
watchdogpolska__feder-320
[ "300" ]
7de47f52d69f254aa09d8fa782fc9c5edaab778d
diff --git a/feder/letters/models.py b/feder/letters/models.py --- a/feder/letters/models.py +++ b/feder/letters/models.py @@ -43,6 +43,9 @@ def with_author(self): def for_milestone(self): return self.prefetch_related('attachment_set').with_author() + def is_draft(self): + return self.is_outgoing().filter(eml='') + def is_outgoing(self): return self.filter(author_user__isnull=False) diff --git a/feder/monitorings/urls.py b/feder/monitorings/urls.py --- a/feder/monitorings/urls.py +++ b/feder/monitorings/urls.py @@ -24,6 +24,10 @@ name="letters"), url(_(r'^(?P<slug>[\w-]+)/letter/page-(?P<page>[\d]+)$'), views.LetterListMonitoringView.as_view(), name="letters"), + url(_(r'^(?P<slug>[\w-]+)/drafts'), views.DraftListMonitoringView.as_view(), + name="drafts"), + url(_(r'^(?P<slug>[\w-]+)/drafts/page-(?P<page>[\d]+)$'), views.DraftListMonitoringView.as_view(), + name="drafts"), url(_(r'^(?P<slug>[\w-]+)/~update$'), views.MonitoringUpdateView.as_view(), name="update"), url(_(r'^(?P<slug>[\w-]+)/~delete$'), views.MonitoringDeleteView.as_view(), diff --git a/feder/monitorings/views.py b/feder/monitorings/views.py --- a/feder/monitorings/views.py +++ b/feder/monitorings/views.py @@ -74,6 +74,23 @@ def get_object_list(self, obj): all()) +class DraftListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView): + model = Monitoring + template_name_suffix = '_draft_list' + select_related = ['user', ] + prefetch_related = ['questionary_set', ] + paginate_by = 25 + + def get_object_list(self, obj): + return (Letter.objects.filter(case__monitoring=obj). + is_draft(). + select_related('case'). + with_author(). + attachment_count(). + order_by('-created'). + all()) + + class MonitoringCreateView(LoginRequiredMixin, PermissionRequiredMixin, UserFormKwargsMixin, CreateView): model = Monitoring
diff --git a/feder/monitorings/tests.py b/feder/monitorings/tests.py --- a/feder/monitorings/tests.py +++ b/feder/monitorings/tests.py @@ -6,6 +6,7 @@ from feder.cases.models import Case from feder.institutions.factories import InstitutionFactory from feder.letters.factories import IncomingLetterFactory +from feder.letters.factories import OutgoingLetterFactory from feder.main.mixins import PermissionStatusMixin from feder.users.factories import UserFactory from .factories import MonitoringFactory @@ -114,6 +115,27 @@ def test_display_letter(self): self.assertContains(response, letter.note) +class DraftListMonitoringViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): + status_anonymous = 200 + status_no_permission = 200 + permission = [] + + def get_url(self): + return reverse('monitorings:drafts', kwargs={'slug': self.monitoring}) + + def test_list_display(self): + response = self.client.get(self.get_url()) + self.assertEqual(response.status_code, 200) + self.assertContains(response, self.monitoring) + + def test_display_draft(self): + draft_letter = OutgoingLetterFactory(case__monitoring=self.monitoring) + incoming_letter = IncomingLetterFactory(case__monitoring=self.monitoring) + response = self.client.get(self.get_url()) + self.assertContains(response, draft_letter.body) + self.assertContains(response, draft_letter.note) + self.assertNotContains(response, incoming_letter.body) + class MonitoringUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): permission = ['monitorings.change_monitoring', ]
Wprowadzić wykaz projektów listów ![zaznaczenie_0951](https://user-images.githubusercontent.com/3618479/29756710-40121212-8ba6-11e7-9c09-607e78e27067.png) Jak na zdjęciu widoczny tylko dla osób uprawnionych do zatwierdzenia projektów.
2017-09-15T18:21:29
watchdogpolska/feder
322
watchdogpolska__feder-322
[ "321" ]
2fe4fb6b1d73c442476387f5a9a03defcaca5b0d
diff --git a/config/settings/local.py b/config/settings/local.py --- a/config/settings/local.py +++ b/config/settings/local.py @@ -40,7 +40,7 @@ # django-debug-toolbar # ------------------------------------------------------------------------------ -MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) +# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
brak zapytań w zakładce SQL w Django debug toolbar ![image](https://user-images.githubusercontent.com/1237985/30499623-9559ddcc-9a5b-11e7-8225-0d7f6786960b.png)
2017-09-15T19:22:57
watchdogpolska/feder
328
watchdogpolska__feder-328
[ "291" ]
2f2b26701dd64a66712d0835909cdc38fc819d4a
diff --git a/feder/letters/logs/urls.py b/feder/letters/logs/urls.py --- a/feder/letters/logs/urls.py +++ b/feder/letters/logs/urls.py @@ -11,6 +11,8 @@ name="list"), url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)$'), views.EmailLogMonitoringListView.as_view(), name="list"), + url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(), + name="export"), url(_(r'^log-(?P<pk>[\d-]+)$'), views.EmailLogDetailView.as_view(), name="detail"), ] diff --git a/feder/letters/logs/views.py b/feder/letters/logs/views.py --- a/feder/letters/logs/views.py +++ b/feder/letters/logs/views.py @@ -1,8 +1,12 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals +from django.utils import timezone +import unicodecsv as csv + from braces.views import SelectRelatedMixin, PrefetchRelatedMixin from cached_property import cached_property +from django.http import HttpResponse from django.shortcuts import get_object_or_404 from django.views.generic import DetailView, ListView @@ -10,7 +14,7 @@ from feder.letters.logs.models import EmailLog from feder.main.mixins import AttrPermissionRequiredMixin from feder.monitorings.models import Monitoring - +from django.views.generic.list import ListView class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin): select_related = ['case'] @@ -39,6 +43,61 @@ def monitoring(self): return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk']) +class EmailLogMonitoringCsvView(ListMonitoringMixin, ListView): + permission_required = 'monitorings.view_log' + + select_related = ['case', 'case__institution'] + + @cached_property + def monitoring(self): + return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk']) + + def get(self, *args, **kwargs): + response = self._get_csv_response() + self._write_rows(response, self.get_queryset()) + return response + + @staticmethod + def _get_base_model_field_names(queryset): + opts = queryset.model._meta + return [field.name for field in opts.fields if field.related_model is None] + + def _get_csv_response(self): + csv_response = HttpResponse(content_type='text/csv') + current_time = timezone.now() + filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id, + current_time.strftime('%Y_%m_%d-%H_%M_%S'), + current_time.tzname() + ) + csv_response['Content-Disposition'] = "attachment;filename={0}".format(filename) + return csv_response + + def _write_rows(self, response, queryset): + writer = csv.writer(response) + + # automatically add all fields from base table/model + base_field_names = self._get_base_model_field_names(queryset) + + # print header row + writer.writerow(base_field_names + + [ + 'case id', + 'case email', + 'institution', + 'institution id', + 'monitoring id'] + ) + + for obj in queryset: + writer.writerow( + [getattr(obj, field) for field in base_field_names] + [ + obj.case.id, + obj.case.email, + obj.case.institution.name, + obj.case.institution_id, + obj.case.monitoring_id, + ]) + class EmailLogCaseListView(ListMonitoringMixin, ListView): template_name_suffix = '_list_for_case'
diff --git a/feder/letters/logs/tests.py b/feder/letters/logs/tests.py --- a/feder/letters/logs/tests.py +++ b/feder/letters/logs/tests.py @@ -144,6 +144,25 @@ def get_url(self): return reverse('logs:list', kwargs={'monitoring_pk': self.monitoring.pk}) +class EmailLogMonitoringCsvViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): + permission = ['monitorings.view_log'] + + def get_url(self): + return reverse('logs:export', kwargs={'monitoring_pk': self.monitoring.pk}) + + def test_has_logs(self): + logrecord_for_another_monitoring = LogRecordFactory() + self.login_permitted_user() + response = self.client.get(self.get_url()) + self.assertTrue(response.get('Content-Disposition').startswith('attachment;filename=')) + self.assertContains(response, self.emaillog.case.institution) + self.assertNotContains(response, + logrecord_for_another_monitoring.email.case.institution.name, + 200, + ('Csv export for a monitoring should not ' + 'contain emaillogs for another monitoring')) + + class EmailLogCaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): permission = ['monitorings.view_log']
Eksport w CSV EmailLog Wprowadziliśmy w ```feder.letters.logs``` statystyki dostarczania wiadomości. Należy wprowadzić zestawienie wszystkich danych z EmailLog dla danego monitoringu, aby można było zrobić statystykę czy coś.
1. czy eksport powinien być autoryzowany? + Jakie uprawnienie? 2. jakie pola (z case, monitoring) powinny być zawarte w CSV? 1/ Generalnie dane w ```EmailLog``` są chronione poprzez ``monitorings.view_log``, więc uprawnienie nie niższe ( https://github.com/watchdogpolska/feder/blob/ae89d4a93ebb2f75025305758e6b988787db04f2/feder/letters/logs/views.py#L20 ). Nie ma także sensu wprowadzać dodatkowego uprawnienia dla samego eksportu. 2/ Myślę, że styknie ```Case.id, Monitoring.id, Case.email,Case.institution.name,Case.institution.id``` + ```EmailLog```.
2017-09-20T19:53:34
watchdogpolska/feder
329
watchdogpolska__feder-329
[ "83" ]
9fe644ead82eba249259450ffaab970ab4c9dc43
diff --git a/feder/monitorings/filters.py b/feder/monitorings/filters.py --- a/feder/monitorings/filters.py +++ b/feder/monitorings/filters.py @@ -6,10 +6,22 @@ from django.utils.translation import ugettext_lazy as _ from .models import Monitoring +from teryt_tree.dal_ext.filters import VoivodeshipFilter, CountyFilter, CommunityFilter class MonitoringFilter(django_filters.FilterSet): created = django_filters.DateRangeFilter(label=_("Creation date")) + voivodeship = VoivodeshipFilter( + widget=autocomplete.ModelSelect2(url='teryt:voivodeship-autocomplete') + ) + county = CountyFilter( + widget=autocomplete.ModelSelect2(url='teryt:county-autocomplete', + forward=['voivodeship']) + ) + community = CommunityFilter( + widget=autocomplete.ModelSelect2(url='teryt:community-autocomplete', + forward=['county']) + ) def __init__(self, *args, **kwargs): super(MonitoringFilter, self).__init__(*args, **kwargs) diff --git a/feder/monitorings/models.py b/feder/monitorings/models.py --- a/feder/monitorings/models.py +++ b/feder/monitorings/models.py @@ -24,6 +24,9 @@ class MonitoringQuerySet(models.QuerySet): def with_case_count(self): return self.annotate(case_count=models.Count('case')) + def area(self, jst): + return self.filter(case__institution__jst__tree_id=jst.tree_id, + case__institution__jst__lft__range=(jst.lft, jst.rght)) @reversion.register() class Monitoring(TimeStampedModel):
diff --git a/feder/monitorings/tests.py b/feder/monitorings/tests.py --- a/feder/monitorings/tests.py +++ b/feder/monitorings/tests.py @@ -3,6 +3,7 @@ from django.test import TestCase from guardian.shortcuts import assign_perm +from feder.cases.factories import CaseFactory from feder.cases.models import Case from feder.institutions.factories import InstitutionFactory from feder.letters.factories import IncomingLetterFactory @@ -81,6 +82,14 @@ def test_list_display(self): self.assertEqual(response.status_code, 200) self.assertContains(response, self.monitoring) + def test_filter_by_voivodship(self): + self.case = CaseFactory() # creates a new monitoring (and institution, JST, too) + + response = self.client.get(reverse('monitorings:list') + + '?voivodeship={0}'.format(self.case.institution.jst.id)) + self.assertContains(response, self.case.monitoring) + self.assertNotContains(response, self.monitoring) + class MonitoringDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): status_anonymous = 200
Autocomplete dla JST w MonitoringFilter
```django-teryt-tree``` ma odpowiednie widgety już chyba.
2017-09-21T14:20:25
watchdogpolska/feder
349
watchdogpolska__feder-349
[ "339" ]
b121c7fe6081f5deea8eeeed913f50b46d0ec548
diff --git a/feder/tasks/views.py b/feder/tasks/views.py --- a/feder/tasks/views.py +++ b/feder/tasks/views.py @@ -142,7 +142,10 @@ def action(self, *args, **kwargs): self.object.save() def get_success_message(self): - return _("Survey {object} selected!").format(object=self.object) + if self.direction == 'up': + return _("Survey credibility increased!") + else: + return _("Survey credibility decreased!") def get_success_url(self): return reverse('tasks:survey', kwargs={'pk': self.object.task_id})
Przyjazny komunikat o aktualizacji wiarygodności ![zaznaczenie_1027](https://user-images.githubusercontent.com/3618479/30726463-49ccc99c-9f4b-11e7-8b74-db3108ac4e45.png)
2017-09-25T14:16:01
watchdogpolska/feder
389
watchdogpolska__feder-389
[ "388" ]
282d50a3d258577ea541961b9a8736ca3c8cd4bc
diff --git a/feder/letters/views.py b/feder/letters/views.py --- a/feder/letters/views.py +++ b/feder/letters/views.py @@ -220,8 +220,12 @@ class ReportSpamView(ActionMessageMixin, ActionView): def get_queryset(self): return super(ReportSpamView, self).get_queryset().filter(is_spam=Letter.SPAM.unknown) + @property + def if_can_mark_spam(self): + return self.request.user.has_perm('spam_mark', self.object.case.monitoring) + def action(self): - if self.request.user.is_superuser: + if self.if_can_mark_spam: if 'valid' in self.request.POST: self.object.is_spam = Letter.SPAM.non_spam else: @@ -233,11 +237,11 @@ def action(self): author = None if self.request.user.is_anonymous() else self.request.user Alert.objects.create(monitoring=self.object.case.monitoring, reason=_("SPAM"), - author=None, + author=author, link_object=self.object) def get_success_message(self): - if self.request.user.is_superuser: + if self.if_can_mark_spam: if 'valid' in self.request.POST: return _("The letter {object} has been marked as valid.").format(object=self.object) return _("The message {object} has been marked " diff --git a/feder/monitorings/migrations/0010_auto_20180205_0811.py b/feder/monitorings/migrations/0010_auto_20180205_0811.py new file mode 100644 --- /dev/null +++ b/feder/monitorings/migrations/0010_auto_20180205_0811.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.10.7 on 2018-02-05 08:11 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('monitorings', '0009_monitoring_is_public'), + ] + + operations = [ + migrations.AlterModelOptions( + name='monitoring', + options={'ordering': ['created'], 'permissions': (('add_questionary', 'Can add questionary'), ('change_questionary', 'Can change questionary'), ('delete_questionary', 'Can delete questionary'), ('add_case', 'Can add case'), ('change_case', 'Can change case'), ('delete_case', 'Can delete case'), ('add_task', 'Can add task'), ('change_task', 'Can change task'), ('delete_task', 'Can delete task'), ('add_letter', 'Can add letter'), ('reply', 'Can reply'), ('add_draft', 'Add reply draft'), ('change_letter', 'Can change task'), ('delete_letter', 'Can delete letter'), ('view_alert', 'Can view alert'), ('change_alert', 'Can change alert'), ('delete_alert', 'Can delete alert'), ('manage_perm', 'Can manage perms'), ('select_survey', 'Can select answer'), ('view_log', 'Can view logs'), ('spam_mark', 'Can mark spam')), 'verbose_name': 'Monitoring', 'verbose_name_plural': 'Monitoring'}, + ), + ] diff --git a/feder/monitorings/models.py b/feder/monitorings/models.py --- a/feder/monitorings/models.py +++ b/feder/monitorings/models.py @@ -77,6 +77,7 @@ class Meta: ('manage_perm', _('Can manage perms')), ('select_survey', _('Can select answer')), ('view_log', _('Can view logs')), + ('spam_mark', _("Can mark spam")) ) def __unicode__(self):
diff --git a/feder/letters/tests/test_views.py b/feder/letters/tests/test_views.py --- a/feder/letters/tests/test_views.py +++ b/feder/letters/tests/test_views.py @@ -197,7 +197,7 @@ def test_letters(self): class ReportSpamViewTestCase (ObjectMixin, PermissionStatusMixin, TestCase): status_anonymous = 200 status_no_permission = 200 - permission = [] + permission = ['monitorings.spam_mark', ] def get_url(self): return reverse('letters:spam', kwargs={'pk': self.from_institution.pk}) @@ -207,17 +207,24 @@ def test_create_report_for_anonymous(self): self.assertEqual(Alert.objects.count(), 1) alert = Alert.objects.get() self.assertEqual(alert.link_object, self.from_institution) + self.assertEqual(alert.author, None) - def test_hide_by_admin(self): - self.client.login(username=UserFactory(is_superuser=True).username, - password='pass') + def test_create_report_for_user(self): + self.client.login(username='john', password='pass') + response = self.client.post(self.get_url()) + self.assertEqual(Alert.objects.count(), 1) + alert = Alert.objects.get() + self.assertEqual(alert.link_object, self.from_institution) + self.assertEqual(alert.author, self.user) + + def test_hide_by_staff(self): + self.login_permitted_user() response = self.client.post(self.get_url()) self.from_institution = Letter.objects_with_spam.get(pk=self.from_institution.pk) self.assertEqual(self.from_institution.is_spam, Letter.SPAM.spam) def test_mark_as_valid(self): - self.client.login(username=UserFactory(is_superuser=True).username, - password='pass') + self.login_permitted_user() response = self.client.post(self.get_url(), data={'valid': 'x'}) self.from_institution.refresh_from_db() self.assertEqual(self.from_institution.is_spam, Letter.SPAM.non_spam)
Udokumentowanie recenzowania spamu Agnieszka Zdanowicz: > na razie zgłaszam spamy w stylu życzenia świąteczne etc. Karol, a jak działa to zgłaszanie spamów? Ty dostajesz info i to kasujesz? Karol: > Tak, dostaje informacje o "alertach" w sprawie jako osoba upoważniona do wszystkiego w monitoringach, które utworzyłem (w tych, co Kamil tworzył on je dostaje, bo chodzi o nadanie jak największej odpowiedzialności autorowi monitoringu np. zaprzyjaźnionej organizacji społecznej). > Chętnie oddam to zadanie komuś innemu (pozwala na to model uprawnień). Podaj proszę nazwę użytkownika, a nadam Ci uprawnienia do recenzowania spamu i udokumentuje procedurę postępowania w tym zakresie.
2018-02-05T08:50:48
watchdogpolska/feder
433
watchdogpolska__feder-433
[ "428" ]
ee584ac403fb51fe9f525715fd6cc4c0373434ce
diff --git a/feder/letters/factories.py b/feder/letters/factories.py --- a/feder/letters/factories.py +++ b/feder/letters/factories.py @@ -1,15 +1,12 @@ from email.mime.text import MIMEText -import factory import factory.fuzzy -from django.core.mail import EmailMessage from factory.django import FileField -from feder.cases.factories import CaseFactory from feder.institutions.factories import InstitutionFactory from feder.records.factories import RecordFactory from feder.users.factories import UserFactory -from .models import Letter +from .models import Letter, Attachment class MailField(FileField): @@ -55,3 +52,11 @@ class SendOutgoingLetterFactory(LetterFactory): author_user = factory.SubFactory(UserFactory) is_send_yes = factory.PostGenerationMethodCall('send') + + +class AttachmentFactory(factory.django.DjangoModelFactory): + letter = factory.SubFactory(InstitutionFactory) + attachment = factory.django.FileField() + + class Meta: + model = Attachment
diff --git a/feder/letters/tests/test_views.py b/feder/letters/tests/test_views.py --- a/feder/letters/tests/test_views.py +++ b/feder/letters/tests/test_views.py @@ -15,7 +15,7 @@ from feder.main.mixins import PermissionStatusMixin from feder.monitorings.factories import MonitoringFactory from feder.users.factories import UserFactory -from ..factories import IncomingLetterFactory, OutgoingLetterFactory +from ..factories import IncomingLetterFactory, OutgoingLetterFactory, AttachmentFactory from django.utils.translation import ugettext_lazy as _ @@ -70,6 +70,11 @@ def test_contains_link_to_report_spam(self): self.assertContains(response, _("Report spam")) self.assertContains(response, reverse('letters:spam', kwargs={'pk': self.letter.pk})) + def test_contains_link_to_attachment(self): + attachment = AttachmentFactory(letter=self.letter) + response = self.client.get(self.get_url()) + self.assertContains(response, attachment.attachment.url) + class LetterCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): permission = ['monitorings.add_letter', ]
sposób widzenia załączników do nas wysłanych Nie wiem czemu jest tak, że ja wchodzę w korespondencję z daną gminą w danym monitoringu, to przy mailach widzę załączniki: ![image](https://user-images.githubusercontent.com/36925935/37775670-d1d18a68-2de3-11e8-9787-af1c19084734.png) A jak już wejdę z konkretną wiadomość, to ich nie ma: ![image](https://user-images.githubusercontent.com/36925935/37775701-ea66de2a-2de3-11e8-931c-cf61c2e5c964.png) Czy to się da zmienić, żeby po wejściu z konkretną wiadomość też było widać te załączniki?
Tak.
2018-03-26T23:14:18
watchdogpolska/feder
441
watchdogpolska__feder-441
[ "427" ]
aa31ca2463352c60365fdd003a92bd7b2d07ed95
diff --git a/feder/monitorings/views.py b/feder/monitorings/views.py --- a/feder/monitorings/views.py +++ b/feder/monitorings/views.py @@ -60,6 +60,10 @@ def get_queryset(self): return qs + def get_context_data(self, **kwargs): + kwargs['url_extra_kwargs'] = {'slug': self.object.slug} + return super(MonitoringDetailView, self).get_context_data(**kwargs) + def get_object_list(self, obj): return (Case.objects.filter(monitoring=obj). select_related('institution'). @@ -77,6 +81,10 @@ class LetterListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraLi prefetch_related = ['questionary_set', ] paginate_by = 25 + def get_context_data(self, **kwargs): + kwargs['url_extra_kwargs'] = {'slug': self.object.slug} + return super(LetterListMonitoringView, self).get_context_data(**kwargs) + def get_object_list(self, obj): return (Letter.objects.filter(record__case__monitoring=obj). select_related('record__case'). @@ -93,6 +101,10 @@ class DraftListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraLis prefetch_related = ['questionary_set', ] paginate_by = 25 + def get_context_data(self, **kwargs): + kwargs['url_extra_kwargs'] = {'slug': self.object.slug} + return super(DraftListMonitoringView, self).get_context_data(**kwargs) + def get_object_list(self, obj): return (Letter.objects.filter(record__case__monitoring=obj). is_draft().
zmiana sposobu przewijania spraw z mailami z monitoringu W tej chwili można przeskakiwać tylko o jedną stronę do przodu i do tyłu, a chodziłoby o to, żeby można było przeskoczyć o kilka lub na sam koniec. Jest tak: ![image](https://user-images.githubusercontent.com/36925935/37586415-8c8a566c-2b5c-11e8-9262-bccfc63bea59.png) A lepiej jakby było tak: ![image](https://user-images.githubusercontent.com/36925935/37586480-c96e5df8-2b5c-11e8-9090-487ecbd94ed6.png)
To jest paginacja Dziś przygotuje.
2018-04-05T22:34:01
watchdogpolska/feder
932
watchdogpolska__feder-932
[ "930" ]
1374b32db20cc51d22eb31aa1bf0b7653ee6584a
diff --git a/feder/letters/views.py b/feder/letters/views.py --- a/feder/letters/views.py +++ b/feder/letters/views.py @@ -238,12 +238,12 @@ class LetterDeleteView( permission_required = "monitorings.delete_letter" def delete(self, request, *args, **kwargs): - result = super().delete(request, *args, **kwargs) - for x in self.object.attachment_set.all(): - x.attachment.delete() # Delete file - self.object.attachment_set.all().delete() # Delete objects - self.object.eml.delete() # Delete file - return result + obj = self.get_object() + # Manually deleting related files + for att_obj in obj.attachment_set.all(): + att_obj.attachment.delete() + obj.eml.delete() + return super().delete(request, *args, **kwargs) def get_success_url(self): return self.object.case.get_absolute_url()
diff --git a/feder/letters/tests/test_views.py b/feder/letters/tests/test_views.py --- a/feder/letters/tests/test_views.py +++ b/feder/letters/tests/test_views.py @@ -194,6 +194,13 @@ def test_remove_eml_file(self): self.client.post(self.get_url()) self.assertFalse(os.path.isfile(self.from_user.eml.file.name)) + def test_remove_letter_with_attachment(self): + self.login_permitted_user() + attachment = AttachmentFactory(letter=self.from_user) + self.assertTrue(os.path.isfile(attachment.attachment.file.name)) + self.client.post(self.get_url()) + self.assertFalse(os.path.isfile(attachment.attachment.file.name)) + class LetterReplyViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase): permission = ["monitorings.reply", "monitorings.add_draft"]
błąd przy usuwaniu wiadomości @dzejkobi weszłam w monitoring "Samorządy - o liczbę wniosków" i zobaczyłam, że gmina Żarnów wysyła nam z automatu wiadomości o różnych szkoleniach, życzenia świąteczne etc. Najpierw zaczęłam te wiadomości oznaczać, jako spam, ale przestraszyłam się, że potem z automatu odpowiedzi od tej gminy będziemy traktować jako spam i postanowiłam po prostu usunąć te wiadomości za pomocą przycisku "usuń": ![image](https://user-images.githubusercontent.com/36925935/103994006-b47d8000-5196-11eb-8f33-d8d4c0fdf15b.png) I wyskoczył mi taki błąd: ![image](https://user-images.githubusercontent.com/36925935/103994411-42596b00-5197-11eb-9f1f-3a71b10eff11.png) Ustawiłam sobie uprawnienia, że mogę kasować list, więc błąd nie wynika z braku uprawnień. Czy możesz spojrzeć, co tu poszło nie tak?
Ok, sprawdzę.
2021-01-08T12:11:49
watchdogpolska/feder
1,150
watchdogpolska__feder-1150
[ "1148" ]
857f0b28e34f0ced9f74cf35770e25b151f1c8d5
diff --git a/feder/letters/models.py b/feder/letters/models.py --- a/feder/letters/models.py +++ b/feder/letters/models.py @@ -238,7 +238,6 @@ def _email_context(self): "body": body, "footer": self.case.monitoring.email_footer, "quote": email_wrapper(self.quote), - "attachments": self.attachment_set.all(), } def body_with_footer(self): @@ -265,6 +264,10 @@ def _construct_message(self, msg_id=None): to=[self.case.institution.email], body=txt_content, headers=headers, + attachments=[ + (att.filename, att.attachment.file.read(), "application/octet-stream") + for att in self.attachment_set.all() + ], ) msg.attach_alternative(html_content, "text/html") return msg
diff --git a/feder/letters/tests/test_views.py b/feder/letters/tests/test_views.py --- a/feder/letters/tests/test_views.py +++ b/feder/letters/tests/test_views.py @@ -250,7 +250,7 @@ def test_send_reply(self): self.assertEqual(len(mail.outbox), 1) new_letter = Letter.objects.filter(title="Lorem").get() new_attachment = new_letter.attachment_set.get() - self.assertIn(new_attachment.get_full_url(), mail.outbox[0].body) + self.assertEqual(mail.outbox[0].attachments[0][0], new_attachment.filename) self.assertEqual(Record.objects.count(), 3) def test_no_send_drafts(self):
Sposób wysyłania załączników poprzez "Wiadomość masową" Lasy i Obywatele napotkały problem z wnioskami do nadleśnictw, polegający na zakwestionowaniu mejlowej formy wniosku, jako nie dającej pewności doręczenia. Doszło więc do ponownej wysyłki z załączeniem elektronicznie podpisanego PDFa i jego skompresowanej wersji. Niestety tym razem nadlleśnictwa zgłaszają, że załączniki widzą, jako linki a ich informatycy zabraniają takich kilkać ze względów bezpieczeństwa. W związku z tym nasuwają się następujące pytania: 1. Czy tak jest w istocie, że załączniki do wiadomości wychodzą z Fedrowania, jako linki? 2. Czy jest racjonalnym dodać opcjonalną możliwość załączania załączników, jako faktycznych załączników? 3. Czy jest jakiś sposób, żeby doraźnie, jednorazowo, przez jakąś zmianę w konfiguracji, wymusić wysyłkę realnych załączników (to byłoby niecałe 300 KB do 321 nadleśnictw)? Chodzi o te załączniki dodawane tutaj: https://fedrowanie.siecobywatelska.pl/monitoringi/udzial-gmin-w-ochronie-lasow-2/~wiadomosc-masowa ![obraz](https://user-images.githubusercontent.com/33778688/155523417-fd23aebf-d7dc-4a15-a4f8-0280a7a6b2bc.png)
1/ Tak. Uwzględniając #1069 możesz pobrać kopie każdej wysłanej przez nas wiadomości. 2/ Możliwe. 3/ Nie ma takiej możliwości w Fedrowaniu obecnie. Wymaga to zmiany w logice wysyłki. Dokładniej to w tym pliku https://github.com/watchdogpolska/feder/blob/857f0b28e34f0ced9f74cf35770e25b151f1c8d5/feder/letters/models.py#L253-L270 . A także plikach tu wskazanych: https://github.com/watchdogpolska/feder/blob/857f0b28e34f0ced9f74cf35770e25b151f1c8d5/feder/letters/models.py#L249-L250
2022-02-27T23:42:58
networkx/networkx
750
networkx__networkx-750
[ "749" ]
f2cac16be4212f94155d06d275d8f1da8dce9ffb
diff --git a/networkx/algorithms/flow/maxflow.py b/networkx/algorithms/flow/maxflow.py --- a/networkx/algorithms/flow/maxflow.py +++ b/networkx/algorithms/flow/maxflow.py @@ -216,7 +216,7 @@ def _create_flow_dict(G, H, capacity='capacity'): flow[u][v] = abs(inf_capacity_flows[(u, v)] - inf_capacity_flows[(v, u)]) else: - flow[u][v] = abs(G[u][v][capacity] - H[v][u][capacity]) + flow[u][v] = G[u][v][capacity] flow[v][u] = flow[u][v] return flow diff --git a/networkx/algorithms/flow/mincost.py b/networkx/algorithms/flow/mincost.py --- a/networkx/algorithms/flow/mincost.py +++ b/networkx/algorithms/flow/mincost.py @@ -137,7 +137,8 @@ def _find_entering_edge(H, c, capacity = 'capacity'): return newEdge -def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity'): +def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity', + reverse=False): """Find an edge that will leave the basis and the value by which we can increase or decrease the flow on that edge. @@ -151,14 +152,26 @@ def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity'): eps = False leavingEdge = () - # If cycle is a digon, newEdge is always a reverse edge (otherwise, - # there would be no leaving edge). + # If cycle is a digon. if len(cycle) == 3: u, v = newEdge - if H[u][v].get('flow', 0) > H[v][u].get('flow', 0): - return (v, u), H[v][u].get('flow', 0) + if capacity not in H[u][v] and capacity not in H[v][u]: + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + + "Min cost flow unbounded below.") + + if reverse: + if H[u][v].get('flow', 0) > H[v][u].get('flow', 0): + return (v, u), H[v][u].get('flow', 0) + else: + return (u, v), H[u][v].get('flow', 0) else: - return (u, v), H[u][v].get('flow', 0) + uv_residual = H[u][v].get(capacity, 0) - H[u][v].get('flow', 0) + vu_residual = H[v][u].get(capacity, 0) - H[v][u].get('flow', 0) + if (uv_residual > vu_residual): + return (v, u), vu_residual + else: + return (u, v), uv_residual # Find the forward edge with the minimum value for capacity - 'flow' # and the reverse edge with the minimum value for 'flow'. @@ -409,24 +422,29 @@ def network_simplex(G, demand = 'demand', capacity = 'capacity', path2 = path2[path2.index(join):] cycle = [] if H[newEdge[0]][newEdge[1]].get('flow', 0) == 0: + reverse = False path2.reverse() cycle = path1 + path2 else: # newEdge is at capacity + reverse = True path1.reverse() cycle = path2 + path1 # Find the leaving edge. Will stop here if cycle is an infinite # capacity negative cost cycle. leavingEdge, eps = _find_leaving_edge(H, T, cycle, newEdge, - capacity = capacity) + capacity=capacity, + reverse=reverse) # Actual augmentation happens here. If eps = 0, don't bother. if eps: flowCost -= cycleCost * eps if len(cycle) == 3: + if reverse: + eps = -eps u, v = newEdge - H[u][v]['flow'] -= eps - H[v][u]['flow'] -= eps + H[u][v]['flow'] = H[u][v].get('flow', 0) + eps + H[v][u]['flow'] = H[v][u].get('flow', 0) + eps else: for index, u in enumerate(cycle[:-1]): v = cycle[index + 1]
diff --git a/networkx/algorithms/flow/tests/test_mincost.py b/networkx/algorithms/flow/tests/test_mincost.py --- a/networkx/algorithms/flow/tests/test_mincost.py +++ b/networkx/algorithms/flow/tests/test_mincost.py @@ -251,6 +251,31 @@ def test_digon(self): assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 2857140) + def test_infinite_capacity_neg_digon(self): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), + (2, {'demand': -4}), + (3, {'demand': 4}), + ] + edges = [(1, 2, {'weight': -600}), + (2, 1, {'weight': 0}), + (2, 3, {'capacity': 5, 'weight': 714285}), + (3, 2, {'capacity': 2, 'weight': 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + def test_finite_capacity_neg_digon(self): + """The digon should receive the maximum amount of flow it can handle. + Taken from ticket #749 by @chuongdo.""" + G = nx.DiGraph() + G.add_edge('a', 'b', capacity=1, weight=-1) + G.add_edge('b', 'a', capacity=1, weight=-1) + min_cost = -2 + assert_equal(nx.min_cost_flow_cost(G), min_cost) + def test_multidigraph(self): """Raise an exception for multidigraph.""" G = nx.MultiDiGraph()
Min cost flow non-termination with negative cost cycle of finite capacity The current min cost flow implementation does not terminate on a simple problem involving a negative cycle with finite capacity: ``` import networkx G = networkx.DiGraph() G.add_edge('a', 'b', capacity=1, weight=-1) G.add_edge('b', 'a', capacity=1, weight=-1) print networkx.min_cost_flow_cost(G) ``` Am I misinterpreting the guarantees stated in the code documentation? (The documentation seems only to disallow negative cycles of infinite capacity, but seems to imply that negative cost cycles of finite capacity should be ok?)
2012-08-06T19:43:22
networkx/networkx
768
networkx__networkx-768
[ "765" ]
569928a8dc51e6b56be3c1941794304dfff8e1f9
diff --git a/networkx/readwrite/pajek.py b/networkx/readwrite/pajek.py --- a/networkx/readwrite/pajek.py +++ b/networkx/readwrite/pajek.py @@ -37,10 +37,12 @@ def generate_pajek(G): for format information. """ if G.name=='': - name='NetworkX' + name='NetworkX' else: - name=G.name - yield '*network %s'%name + name=G.name + # Apparently many Pajek format readers can't process this line + # So we'll leave it out for now. + # yield '*network %s'%name # write nodes with attributes yield '*vertices %s'%(G.order())
Pajek exporter should not write first line "*network NetworkX" Currently, the first line of a pajek file created by networkx looks like: ``` *network NetworkX ``` Many programs that import pajek files crash, because they expect the first line to be ``` *vertices 762 ``` Given that the `*network NetworkX` serves no purpose, could we modify the pajek writer to not create this first line?
That line is apparently allowed by the "specification" - see http://vlado.fmf.uni-lj.si/pub/networks/pajek/svganim/1.10.7.1/PajekToSvgAnim.pdf but clearly not necessary for functionality. So I think we can remove it without any harm. Great, I'm glad you agree. Just to summarize the motivation for this small change: there are many software tools which use a subset of the pajek specification, and this line causes problems for them. So while it may be technically correct to add that line (according to the full pajek specification), unless it's serving an important purpose, I argue we should leave it out.
2012-09-19T22:39:58
networkx/networkx
778
networkx__networkx-778
[ "772" ]
3245337e6eed3af6c8d735338eba9e2d14f1c156
diff --git a/networkx/algorithms/traversal/breadth_first_search.py b/networkx/algorithms/traversal/breadth_first_search.py --- a/networkx/algorithms/traversal/breadth_first_search.py +++ b/networkx/algorithms/traversal/breadth_first_search.py @@ -30,11 +30,12 @@ def bfs_edges(G,source): except StopIteration: stack.pop(0) - def bfs_tree(G, source): """Return directed tree of breadth-first-search from source.""" - return nx.DiGraph(bfs_edges(G,source)) - + T = nx.DiGraph() + T.add_node(source) + T.add_edges_from(bfs_edges(G,source)) + return T def bfs_predecessors(G, source): """Return dictionary of predecessors in breadth-first-search from source.""" @@ -47,6 +48,3 @@ def bfs_successors(G, source): for s,t in bfs_edges(G,source): d[s].append(t) return dict(d) - - - diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py --- a/networkx/algorithms/traversal/depth_first_search.py +++ b/networkx/algorithms/traversal/depth_first_search.py @@ -1,6 +1,6 @@ """ ================== -Depth-first search +Depth-first search ================== Basic algorithms for depth-first searching. @@ -45,11 +45,15 @@ def dfs_edges(G,source=None): except StopIteration: stack.pop() - -def dfs_tree(G, source=None): +def dfs_tree(G, source): """Return directed tree of depth-first-search from source.""" - return nx.DiGraph(dfs_edges(G,source=source)) - + T = nx.DiGraph() + if source is None: + T.add_nodes_from(G) + else: + T.add_node(source) + T.add_edges_from(dfs_edges(G,source)) + return T def dfs_predecessors(G, source=None): """Return dictionary of predecessors in depth-first-search from source.""" @@ -65,7 +69,7 @@ def dfs_successors(G, source=None): def dfs_postorder_nodes(G,source=None): - """Produce nodes in a depth-first-search post-ordering starting + """Produce nodes in a depth-first-search post-ordering starting from source. """ post=(v for u,v,d in nx.dfs_labeled_edges(G,source=source) @@ -77,7 +81,7 @@ def dfs_postorder_nodes(G,source=None): def dfs_preorder_nodes(G,source=None): """Produce nodes in a depth-first-search pre-ordering starting at source.""" - pre=(v for u,v,d in nx.dfs_labeled_edges(G,source=source) + pre=(v for u,v,d in nx.dfs_labeled_edges(G,source=source) if d['dir']=='forward') # chain source to beginning of pre-ordering # return chain([source],pre) @@ -118,4 +122,3 @@ def dfs_labeled_edges(G,source=None): if stack: yield stack[-1][0],parent,{'dir':'reverse'} yield start,start,{'dir':'reverse'} -
diff --git a/networkx/algorithms/traversal/tests/test_bfs.py b/networkx/algorithms/traversal/tests/test_bfs.py --- a/networkx/algorithms/traversal/tests/test_bfs.py +++ b/networkx/algorithms/traversal/tests/test_bfs.py @@ -27,3 +27,10 @@ def test_bfs_edges(self): edges=nx.bfs_edges(self.G,source=0) assert_equal(list(edges),[(0, 1), (1, 2), (1, 3), (2, 4)]) + def test_bfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T=nx.bfs_tree(G,source=1) + assert_equal(sorted(T.nodes()),[1]) + assert_equal(sorted(T.edges()),[]) diff --git a/networkx/algorithms/traversal/tests/test_dfs.py b/networkx/algorithms/traversal/tests/test_dfs.py --- a/networkx/algorithms/traversal/tests/test_dfs.py +++ b/networkx/algorithms/traversal/tests/test_dfs.py @@ -55,4 +55,14 @@ def test_dfs_labeled_disconnected_edges(self): edges=list(nx.dfs_labeled_edges(self.D)) forward=[(u,v) for (u,v,d) in edges if d['dir']=='forward'] assert_equal(forward,[(0, 0), (0, 1), (2, 2), (2, 3)]) - + + def test_dfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T=nx.dfs_tree(G,source=1) + assert_equal(sorted(T.nodes()),[1]) + assert_equal(sorted(T.edges()),[]) + T=nx.dfs_tree(G,source=None) + assert_equal(sorted(T.nodes()),[1, 2]) + assert_equal(sorted(T.edges()),[])
bug report - BFS_Tree function The source code, only generates list of edges of the bfs tree, from which the tree will be created. therefore the case of tree with one-node only is missing. so if i run: G = nx.Graph() G.add_node(0) print len(bfs_tree(G,0)) i get: 0. which is wrong. please fix it, Barak.
How about we just change bfs_tree() (and dfs_tree()) to ``` def bfs_tree(G, source): """Return directed tree of breadth-first-search from source.""" G = nx.DiGraph() G.add_node(source) G.add_edges_from(bfs_edges(G,source)) return G ``` seems ok... On Wed, Sep 26, 2012 at 4:39 PM, Aric Hagberg [email protected]: > How about we just change bfs_tree() (and dfs_tree()) to > > def bfs_tree(G, source): > """Return directed tree of breadth-first-search from source.""" > G = nx.DiGraph() > G.add_node(source) > G.add_edges_from(bfs_edges(G,source)) > return G > > — > Reply to this email directly or view it on GitHubhttps://github.com/networkx/networkx/issues/772#issuecomment-8892040. Since there we have the possibility to set source=None for the DFS algorithms, we need to add all isolated vertices separately, or they'll be omitted. Yes for the DFS algorithms that is true (the bfs_edges() implementation doesn't work the same way). The simplest solution for dfs_tree() might be ``` def dfs_tree(G, source): T = nx.DiGraph() if source is None: T.add_nodes_from(G) else T.add_node(source) T.add_edges_from(dfs_edges(G,source)) return T ``` In this case of source=None this might not return a tree (a forest instead)
2012-10-18T00:20:16
networkx/networkx
818
networkx__networkx-818
[ "627" ]
3710816683aa8c535d9f8bcfc7432bc9d4068585
diff --git a/networkx/algorithms/assortativity/connectivity.py b/networkx/algorithms/assortativity/connectivity.py --- a/networkx/algorithms/assortativity/connectivity.py +++ b/networkx/algorithms/assortativity/connectivity.py @@ -21,8 +21,15 @@ def _avg_deg_conn(G, neighbors, source_degree, target_degree, if weight is None: s = float(sum(nbrdeg.values())) else: # weight nbr degree by weight of (n,nbr) edge - s = float(sum((G[n][nbr].get(weight,1)*d - for nbr,d in nbrdeg.items()))) + if neighbors == G.neighbors: + s = float(sum((G[n][nbr].get(weight,1)*d + for nbr,d in nbrdeg.items()))) + elif neighbors == G.successors: + s = float(sum((G[n][nbr].get(weight,1)*d + for nbr,d in nbrdeg.items()))) + elif neighbors == G.predecessors: + s = float(sum((G[nbr][n].get(weight,1)*d + for nbr,d in nbrdeg.items()))) dnorm[k] += source_degree(n, weight=weight) dsum[k] += s
diff --git a/networkx/algorithms/assortativity/tests/test_connectivity.py b/networkx/algorithms/assortativity/tests/test_connectivity.py --- a/networkx/algorithms/assortativity/tests/test_connectivity.py +++ b/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -108,3 +108,14 @@ def test_zero_deg(self): assert_equal(c,{0:0,3:1}) + def test_in_out_weight(self): + from itertools import permutations + G=nx.DiGraph() + G.add_edge(1,2,weight=1) + G.add_edge(1,3,weight=1) + G.add_edge(3,1,weight=1) + for s,t in permutations(['in','out','in+out'],2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G,source=s, target=t, + weight='weight') + assert_equal(c,cw)
Mistake in computation of average neighbor degree for directed networks (migrated from Trac #639) Original ticket https://networkx.lanl.gov/trac/ticket/639 Reported 2011-09-27 by Oleguer Sagarra <oleguer.sagarra, assigned to @hagberg. There is a problem in the algorithm to compute the negihbor degree (and average neighbor degree) for directed networks. The thing is that as networkx is based on a dict of dicts one may follow the neighbors of a node "forward" but not "backwards", meaning that with the functions such as G.in_degree(G[n]) one lists the incoming degree of the neighbors of the node "n", neighbors meaning destination of directed edges starting at node n. For all this, the normalization factor used is wrong. Additionally, it has some bugs with division over zero values. These features need to be better redefined for various reasons: a) With directed networks one can compute 4 types of features divided into two subgroups: a.1) For the neighbors of a node n (by neighbors meaning ending points of directed edges starting at node n) one can compute the Incoming our outgoing degree of a node. a.2) For all predecessors of node n (meaning all sources of edges finishing in node n) one can compute the very same thing. b) Additionally, accounting for the average connectivity (or average neighbor degree as a function of degree of a node) things get messy, because then one may compute 8 different features divided into two groups, providing different information: b.1) Consider all the neighbors (defined earlier) of node n. Consider all the nodes n in the net. For each node n compute the (in-degree, out-degree) of their neighbors, then average over all n nodes of (in-degree, out-degree) 'k'. b.2) The same for predecessors of degree n. The current code can be adapted to do so, using the function G.reverse() to overcome the difficulty in quickly computing the predecessors of a node while keeping the code structure. But the documentation need to be rewritten and the normalization of the averages redone. Reference : Serrano, M.A., Maguitman, A., Boguna, M., Fortunato, S. & Vespignani, A. Decoding the structure of the WWW: facts versus sampling biases. Main 10 (2005).at http://arxiv.org/abs/cs/0511035 Simple example showing what I mean: Case 1: Error in normalization division by 0 In [1]: import networkx as nx In [2]: G=nx.DiGraph() In [3]: G.add_edges_from(([1,2],[1,3],[1,4])) ## In [6]: nx.average_in_degree_connectivity(G) ZeroDivisionError Traceback (most recent call last) /Users/ugalss/<ipython-input-6-e2e455d286f8> in <module>() ----> 1 nx.average_in_degree_connectivity(G) /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in average_in_degree_connectivity(G, nodes, weighted) 205 raise nx.NetworkXError("Not defined for undirected graphs.") 206 degree_method = G.in_degree --> 207 return _avg_deg_conn(G, degree_method, nodes=nodes, weighted=weighted) 208 average_in_degree_connectivity.__doc__=average_degree_connectivity.**doc** 209 /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in _avg_deg_conn(G, degree_method, nodes, weighted) 141 dc[k]=avg 142 if avg > 0: --> 143 dc[k]/=dnorm[k] 144 return dc 145 ZeroDivisionError: float division by zero Case 2: Wrong computation of neighbor degree (incoming): In [7]: G.add_edges_from(([2,3],[3,4],[4,2])) In [8]: nx.average_neighbor_in_degree(G) Out[8]: {1: 6.0, 2: 1.0, 3: 1.0, 4: 1.0} # This is wrong. The average in-degree should be 2 for neighbors of node 1. I attach a new version of neighbor_degree.py with proposed (and commented) changes...
Attachment in Trac by Oleguer Sagarra <oleguer.sagarra, 2011-09-27: [neighbor_degree.py](https://networkx.lanl.gov/trac/attachment/ticket/639/neighbor_degree.py) Comment in Trac by Oleguer Sagarra <oleguer.sagarra, 2011-09-27 I can add the feature to compute the different pairs (in-out, out-out, out-in, in-in) but then the structure of calling the functions should be adapted... Comment in Trac by @hagberg, 2011-09-27 Comment in Trac by trac user ugalss, 2011-09-28 Well, I corrected the normalization issues, I added the option to compute the predecessor (mean) degree of each node. Now is just a matter of adapting the k-nearest neighbors and predecessors to be computed using the 4 possible pairs and further test beign done, although I checked and it seems to work all right. The coding is not quite elegant, but it works. Although the option G.reverse() might take a while for heavy networks (have not checked). See attachment neighbor&precedessor_degree.py Comment in Trac by trac user ugalss, 2011-09-30 Added examples to the documentation. Just need to apply the dictionary solution used in #640 to the neighbor /predecessor connectivity for all possible pairs. Attachment in Trac by trac user ugalss, 2011-09-30: [neighbor&predecessor_degree.py](https://networkx.lanl.gov/trac/attachment/ticket/639/neighbor&predecessor_degree.py) Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [f77d9a37a602713db4f493da3a20d39e89b86549/networkx]: ''' #!CommitTicketReference repository="networkx" revision="f77d9a37a602713db4f493da3a20d39e89b86549" Move mixing.py and neighbor_degree.py and tests into assortativity module. Addresses #640 and #639 ''' Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [d2e03fca6b9e8e06cefb0c6925da8946d30c98cb/networkx]: ''' #!CommitTicketReference repository="networkx" revision="d2e03fca6b9e8e06cefb0c6925da8946d30c98cb" Refactor assortativity into parts (pairs, mixing, correlation,...) Addresses #639 and #640 ''' Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [4274d6966b89917efd983ad88c105447e2efacb1/networkx]: ''' #!CommitTicketReference repository="networkx" revision="4274d6966b89917efd983ad88c105447e2efacb1" Refactor tests for assortativity. Addresses #640 and #639 ''' Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [cc05a4fac66798418a8e45cd1ec360eed2756b8c/networkx]: ''' #!CommitTicketReference repository="networkx" revision="cc05a4fac66798418a8e45cd1ec360eed2756b8c" Update docs. Simplify nbr_connectivity. Addresses #639 ''' Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [1c9a21261f54a5ba029cebfdc25422679aecbed6/networkx]: ''' #!CommitTicketReference repository="networkx" revision="1c9a21261f54a5ba029cebfdc25422679aecbed6" Allow all options for average neighbor degree (combinations of in and out degree). Addresses #639 ''' Comment in Trac by @hagberg, 2011-10-02 Replying to [comment:17 Aric Hagberg <aric.hagberg@…>]: > In [1c9a21261f54a5ba029cebfdc25422679aecbed6/networkx]: > ''' > #!CommitTicketReference repository="networkx" revision="1c9a21261f54a5ba029cebfdc25422679aecbed6" > Allow all options for average neighbor degree (combinations of in and out degree). Addresses #639 > ''' Is this the right approach here? Needs tests and also definitions in the docs. Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [273ef2a637c15ebd7069d05d545a8103ffe5e9d7/networkx]: ''' #!CommitTicketReference repository="networkx" revision="273ef2a637c15ebd7069d05d545a8103ffe5e9d7" Update documentation in neighbor degree. Addresses #639 ''' Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-02 In [0bf086792199676f116aefbf5ba9661831e70387/networkx]: ''' #!CommitTicketReference repository="networkx" revision="0bf086792199676f116aefbf5ba9661831e70387" Allow in, out, and in+out degree in neighbor connectivity. Addresses #639 ''' Comment in Trac by @hagberg, 2011-10-02 Replying to [comment:20 Aric Hagberg <aric.hagberg@…>]: > In [0bf086792199676f116aefbf5ba9661831e70387/networkx]: > ''' > #!CommitTicketReference repository="networkx" revision="0bf086792199676f116aefbf5ba9661831e70387" > Allow in, out, and in+out degree in neighbor connectivity. Addresses #639 > ''' Here is another approach to the various degree combinations using "in", "out" and "in+out". Is this better? Are the definitions right? Needs updated documentation and tests. Comment in Trac by Oleguer Sagarra <oleguer.sagarra, 2011-10-03 Hi Aric, just began working.. let me take a look during the week and I'll get back to you on the weekend! Anyhow, I think the approach is ok but need to be corrected again for normalization issues. Also, I don't seem to see how did you implement the predecessor connectivity proposed (see first post, there are 4 sets of different data to be computed, grouped in two groups: neighbors and predecessors) but need to take a deeper look. Will get back to you. Comment in Trac by Aric Hagberg <aric.hagberg, 2011-10-07 In [9bc6654ed10644abea0e607618b34ef5432020e5/networkx]: ''' #!CommitTicketReference repository="networkx" revision="9bc6654ed10644abea0e607618b34ef5432020e5" Fix == bug. Addresses #639 ''' Comment in Trac by @hagberg, 2011-10-10 Comment in Trac by trac user ugalss, 2011-11-01 Sorry for the delay. All seems to be ok now. For the predecessors calculation one just needs to reverse the net prior to running the function. Comment in Trac by @hagberg, 2011-11-06 Actually I think that was a bug. I made a fix in #650 that I believe now handles the predecessors part correctly. Comment in Trac by trac user ugalss, 2011-11-06 All good now then :) It seems to work allright. Comment in Trac by @hagberg, 2011-11-14 Comment in Trac by trac user ugalss, 2012-01-24 Detected a minor bug. Just need to add 2 lines of code to solve it. I attach the new file here for version 1.7 Comment in Trac by @hagberg, 2012-01-28 Can you add a comment about the bug and a test that shows the bug/fix? Comment in Trac by unknown, 2012-01-29 When calling the function "average_degree_connectivity" using the option [ source="in"], the function fails due to the call of the function _avg_deg_conn that goes through the successors of each node i but in the weighted option computes the edges of the directed links connecting i with its predecessors, and not the other way around. It is enough to add 2 lines of code in _Avg_deg_conn as done in the attached file. ######### Here I add an example of the bug ################### ''' In [16]: import networkx as nx In [17]: G=nx.DiGraph() In [18]: G.add_edge(1,3,weight=3) # node 1 connects to 3, but 3 does not connect to one In [19]: G.add_edge(1,2,weight=3) # node 1 connects to 2, but 2 does not connect to one. ## In [21]: nx.assortativity.connectivity.average_degree_connectivity(G,source='in',target='out',weight='weight') KeyError Traceback (most recent call last) /Users/ugalss/<ipython-input-21-1a44745de4fa> in <module>() ----> 1 nx.assortativity.connectivity.average_degree_connectivity(G,source='in',target='out',weight='weight') /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx/algorithms/assortativity/connectivity.pyc in average_degree_connectivity(G, source, target, nodes, weight) 112 neighbors=G.successors 113 return _avg_deg_conn(G, neighbors, source_degree, target_degree, --> 114 nodes=nodes, weight=weight) 115 116 k_nearest_neighbors=average_degree_connectivity /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx/algorithms/assortativity/connectivity.pyc in _avg_deg_conn(G, neighbors, source_degree, target_degree, nodes, weight) 23 else: # weight nbr degree by weight of (n,nbr) edge 24 s = float(sum((G[n][nbr].get(weight,1)*d ---> 25 for nbr,d in nbrdeg.items()))) 26 dnorm[k] += source_degree(n, weight=weight) 27 dsum[k] += s /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx/algorithms/assortativity/connectivity.pyc in <genexpr>((nbr, d)) 23 else: # weight nbr degree by weight of (n,nbr) edge 24 s = float(sum((G[n][nbr].get(weight,1)*d ---> 25 for nbr,d in nbrdeg.items()))) 26 dnorm[k] += source_degree(n, weight=weight) 27 dsum[k] += s KeyError: 1 ''' (It fails because it fetchs the link between 3 and 1, whereas it should check the link between 1 and 3). Comment in Trac by @hagberg, 2012-04-08 I'm not sure that is the correct fix. Can you provide some test cases for directed weighted and unweighted graphs? Comment in Trac by unknown, 2012-04-09 It does not seem to work with negative weights, but for the rest it seems ok: In [19]: G=nx.DiGraph() In [20]: G.add_edge(1,2,weight=1) In [21]: G.add_edge(1,3,weight=2) In [22]: G.add_edge(1,4,weight=3) In [23]: G.add_edge(4,2,weight=3000) In [24]: nx.assortativity.connectivity.average_degree_connectivity(G,source='in',target='out',weight=None) Out[24]: {0: 0.0, 1: 3.0, 2: 2.0} # Ok (uweighted) In [25]: nx.assortativity.connectivity.average_degree_connectivity(G,source='in',target='out',weight='weight') Out[25]: {0: 0.0, 1: 3.0, 2: 1.0006664445184938} # Ok (weighted) Comment in Trac by @hagberg, 2012-04-15 It still fails for the example above. ''' In [11]: G=nx.DiGraph() In [12]: G.add_edge(1,2,weight=3) In [13]: G.add_edge(1,3,weight=3) ## In [14]: nx.average_degree_connectivity(G,source='in',target='out',weight='weight') KeyError Traceback (most recent call last) /networkx-master/<ipython console> in <module>() /home/aric/Software/networkx/networkx-master/networkx/algorithms/assortativity/connectivity.py in average_degree_connectivity(G, source, target, nodes, weight) 112 neighbors=G.successors 113 return _avg_deg_conn(G, neighbors, source_degree, target_degree, --> 114 nodes=nodes, weight=weight) 115 116 k_nearest_neighbors=average_degree_connectivity /networkx-master/networkx/algorithms/assortativity/connectivity.py in _avg_deg_conn(G, neighbors, source_degree, target_degree, nodes, weight) 23 else: # weight nbr degree by weight of (n,nbr) edge 24 s = float(sum((G[n][nbr].get(weight,1)*d ---> 25 for nbr,d in nbrdeg.items()))) 26 dnorm[k] += source_degree(n, weight=weight) 27 dsum[k] += s /networkx-master/networkx/algorithms/assortativity/connectivity.py in <genexpr>((nbr, d)) 23 else: # weight nbr degree by weight of (n,nbr) edge 24 s = float(sum((G[n][nbr].get(weight,1)*d ---> 25 for nbr,d in nbrdeg.items()))) 26 dnorm[k] += source_degree(n, weight=weight) 27 dsum[k] += s KeyError: 1 In [15]: nx.average_degree_connectivity(G,source='in',target='out') Out[15]: {0: 0.0, 1: 2.0} # is this correct? ''' Attachment in Trac by trac user Oleguer Sagarra, 2012-04-16: [connectivity.py](https://networkx.lanl.gov/trac/attachment/ticket/639/connectivity.py) Comment in Trac by trac user Oleguer Sagarra, 2012-04-16 The problem is that the G.adjacency_iter iterator goes through all edges in a forward manner, hence in non symmetric networks some links are missing. I added a fix to the problem, but I suggest that this routine should be re-written using the function G.predecessors() if its performance is too slow (I don't know which way is faster). I uploaded the file connectivity.py. Comment in Trac by @hagberg, 2012-05-27 I'm not sure that is the correct fix. And for sure we don't want to pass on a keyerror in the inner loop. Can you provide some tests cases that show this bug and fix? Comment in Trac by @hagberg, 2012-06-01 Comment in Trac by @hagberg, 2012-06-12
2013-01-13T20:56:21
networkx/networkx
823
networkx__networkx-823
[ "1022" ]
2078f4b52f4ee98fa6a6069f6030b210c190332c
diff --git a/networkx/algorithms/clique.py b/networkx/algorithms/clique.py --- a/networkx/algorithms/clique.py +++ b/networkx/algorithms/clique.py @@ -26,6 +26,115 @@ 'number_of_cliques', 'cliques_containing_node', 'project_down', 'project_up'] +@not_implemented_for('directed') +def get_all_cliques(G): + """Returns all cliques in an undirected graph. + + This method returns cliques of size (cardinality) + k = 1, 2, 3, ..., maxDegree - 1. + + Where maxDegree is the maximal degree of any node in the graph. + + Keyword arguments + ----------------- + G: undirected graph + + Returns + ------- + generator of lists: generator of list for each clique. + + Notes + ----- + To obtain a list of all cliques, use list(get_all_cliques(G)). + + Based on the algorithm published by Zhang et al. (2005) [1]_ + and adapted to output all cliques discovered. + + This algorithm is not suitable for directed graphs. + + This algorithm ignores self-loops and parallel edges as + clique is not conventionally defined with such edges. + + There are often many cliques in graphs. + This algorithm however, hopefully, does not run out of memory + since it only keeps candidate sublists in memory and + continuously removes exhausted sublists. + + References + ---------- + .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J., + Langston, M.A., Samatova, N.F., + Genome-Scale Computational Approaches to Memory-Intensive + Applications in Systems Biology + Supercomputing, 2005. Proceedings of the ACM/IEEE SC 2005 + Conference , vol., no., pp. 12, 12-18 Nov. 2005 + doi: 10.1109/SC.2005.29 + http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + """ + + def greater_neighbors(G, a_node): + """Helper method used in get_all_cliques""" + nodes_sorted = sorted(G.nodes()) + a_node_index = nodes_sorted.index(a_node) + + neighbors_of_a_node = [] + + for another_node_index, another_node in enumerate(nodes_sorted): + if another_node_index > a_node_index and another_node in G.neighbors(a_node): + neighbors_of_a_node.append(another_node) + + return tuple(neighbors_of_a_node) + + # sorted list of nodes in graph + nodes_sorted = sorted(G.nodes()) + + # starting point: build all 2-clique sublists + clique_sublists = [] + for a_node_index, a_node in enumerate(nodes_sorted): + clique_sublist = {} + # sublist base, sb + clique_sublist['sb'] = [a_node] + # common neighbors, cn + clique_sublist['cn'] = greater_neighbors(G, a_node) + clique_sublists.append(clique_sublist) + + # output cliques of size k = 1 + for node in nodes_sorted: + yield [node] + + # output cliques of size k >= 2 + while clique_sublists: + a_sublist = clique_sublists.pop(0) + for node_added in a_sublist['cn']: + neighbors_of_node_added = greater_neighbors(G, node_added) + + current_sublist_base = [] + a_sublist['sb'] + [node_added] + current_sublist_cn = tuple(sorted(set(neighbors_of_node_added).intersection(a_sublist['cn']))) + + #print 'clique: '+str(current_sublist_base) + yield [node for node in current_sublist_base] + + for node in current_sublist_cn: + new_sublist_base = [] + current_sublist_base + new_sublist_base.append(node) + #print 'current_sublist_based =',str(current_sublist_base) + #print 'new_sublist_base =',str(new_sublist_base) + new_sublist_cn = tuple(sorted(set(current_sublist_cn).intersection(greater_neighbors(G, node)))) + + if len(new_sublist_cn) == 0: + #print 'clique: '+str(new_sublist_base) + yield [n for n in new_sublist_base] + elif len(new_sublist_cn) == 1: + #print 'clique: '+str(new_sublist_base) + #print 'new_sublist_base + list(new_sublist_cn):',new_sublist_base+list(new_sublist_cn) + yield [n for n in new_sublist_base] + #print 'clique: '+str(new_sublist_base+new_sublist_cn) + + yield [n for n in new_sublist_base + list(new_sublist_cn)] + else: + #print 'candidate sublist: '+str([new_sublist_base, new_sublist_cn]) + clique_sublists.append({'sb': new_sublist_base, 'cn': new_sublist_cn}) + @not_implemented_for('directed') def find_cliques(G):
diff --git a/networkx/tests/test_cliques.py b/networkx/tests/test_cliques.py new file mode 100644 --- /dev/null +++ b/networkx/tests/test_cliques.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +"""Cliques +======= +""" + +from nose.tools import * +from networkx import * +from networkx.algorithms.clique import get_all_cliques + +class TestCliques(): + def test_paper_figure_4(self): + # Same graph as given in Fig. 4 of paper get_all_cliques is + # based on. + # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + G = Graph() + edges_fig_4 = [('a','b'),('a','c'),('a','d'),('a','e'), + ('b','c'),('b','d'),('b','e'), + ('c','d'),('c','e'), + ('d','e'), + ('f','b'),('f','c'),('f','g'), + ('g','f'),('g','c'),('g','d'),('g','e')] + G.add_edges_from(edges_fig_4) + + cliques = list(get_all_cliques(G)) + expected_cliques = [['a'], + ['b'], + ['c'], + ['d'], + ['e'], + ['f'], + ['g'], + ['a', 'b'], + ['a', 'b', 'd'], + ['a', 'b', 'd', 'e'], + ['a', 'b', 'e'], + ['a', 'c'], + ['a', 'c', 'd'], + ['a', 'c', 'd', 'e'], + ['a', 'c', 'e'], + ['a', 'd'], + ['a', 'd', 'e'], + ['a', 'e'], + ['b', 'c'], + ['b', 'c', 'd'], + ['b', 'c', 'd', 'e'], + ['b', 'c', 'e'], + ['b', 'c', 'f'], + ['b', 'd'], + ['b', 'd', 'e'], + ['b', 'e'], + ['b', 'f'], + ['c', 'd'], + ['c', 'd', 'e'], + ['c', 'd', 'e', 'g'], + ['c', 'd', 'g'], + ['c', 'e'], + ['c', 'e', 'g'], + ['c', 'f'], + ['c', 'f', 'g'], + ['c', 'g'], + ['d', 'e'], + ['d', 'e', 'g'], + ['d', 'g'], + ['e', 'g'], + ['f', 'g'], + ['a', 'b', 'c', 'd'], + ['a', 'b', 'c', 'd', 'e'], + ['a', 'b', 'c', 'e']] + + assert_equal(cliques, expected_cliques)
networkx.algorithms.maximal_matching does not always return the correct result. networkx.algorithms.maximal_matching seems to sometimes generate bogus results. For example the test case in https://gist.github.com/simonwagner/7679793#file-bug_matching-py will return `set([(303, 312), (308, 312)])` as matching - although that is clearly false. Interestingly enough, it will return the correct result for exactly the same graph, if one recreates it from its edges. I would therefore conclude, that the bug stems from an incorrect handling of the vector ordering in an edge [(a,b) vs (b,a)] in an undirected graph.
2013-01-22T12:26:27
networkx/networkx
828
networkx__networkx-828
[ "827" ]
8556a5226e64502e83b7695b93681407045e163b
diff --git a/networkx/readwrite/json_graph/adjacency.py b/networkx/readwrite/json_graph/adjacency.py --- a/networkx/readwrite/json_graph/adjacency.py +++ b/networkx/readwrite/json_graph/adjacency.py @@ -1,4 +1,4 @@ -# Copyright (C) 2011 by +# Copyright (C) 2011-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -8,7 +8,7 @@ from itertools import count,repeat import json import networkx as nx -__author__ = """Aric Hagberg ([email protected]))""" +__author__ = """Aric Hagberg <[email protected]>""" __all__ = ['adjacency_data', 'adjacency_graph'] def adjacency_data(G): @@ -18,7 +18,7 @@ def adjacency_data(G): Parameters ---------- G : NetworkX graph - + Returns ------- data : dict @@ -34,47 +34,54 @@ def adjacency_data(G): >>> import json >>> s = json.dumps(data) - + Notes ----- - Graph, node, and link attributes are stored in this format but keys - for attributes must be strings if you want to serialize with JSON. + Graph, node, and link attributes will be written when using this format + but attribute keys must be strings if you want to serialize the resulting + data with JSON. See Also -------- adjacency_graph, node_link_data, tree_data """ + multigraph = G.is_multigraph() data = {} data['directed'] = G.is_directed() - data['multigraph'] = G.is_multigraph() + data['multigraph'] = multigraph data['graph'] = list(G.graph.items()) data['nodes'] = [] data['adjacency'] = [] for n,nbrdict in G.adjacency_iter(): data['nodes'].append(dict(id=n, **G.node[n])) adj = [] - for nbr,d in nbrdict.items(): - adj.append(dict(id=nbr, **d)) + if multigraph: + for nbr,key in nbrdict.items(): + for k,d in key.items(): + adj.append(dict(id=nbr, key=k, **d)) + else: + for nbr,d in nbrdict.items(): + adj.append(dict(id=nbr, **d)) data['adjacency'].append(adj) return data def adjacency_graph(data, directed=False, multigraph=True): - """Return graph from adjacency data format. + """Return graph from adjacency data format. Parameters ---------- data : dict Adjacency list formatted graph data - + Returns ------- G : NetworkX graph A NetworkX graph object - directed : bool + directed : bool If True, and direction not specified in data, return a directed graph. - multigraph : bool + multigraph : bool If True, and multigraph not specified in data, return a multigraph. Examples @@ -99,13 +106,18 @@ def adjacency_graph(data, directed=False, multigraph=True): graph.graph = dict(data.get('graph',[])) mapping=[] for d in data['nodes']: - node = d.pop('id') + node_data = d.copy() + node = node_data.pop('id') mapping.append(node) - graph.add_node(node, attr_dict=d) + graph.add_node(node, attr_dict=node_data) for i,d in enumerate(data['adjacency']): source = mapping[i] for tdata in d: - target=tdata.pop('id') - graph.add_edge(source,target,attr_dict=tdata) + target_data = tdata.copy() + target = target_data.pop('id') + key = target_data.pop('key', None) + if not multigraph or key is None: + graph.add_edge(source,target,attr_dict=tdata) + else: + graph.add_edge(source,target,key=key, attr_dict=tdata) return graph - diff --git a/networkx/readwrite/json_graph/node_link.py b/networkx/readwrite/json_graph/node_link.py --- a/networkx/readwrite/json_graph/node_link.py +++ b/networkx/readwrite/json_graph/node_link.py @@ -1,4 +1,4 @@ -# Copyright (C) 2011 by +# Copyright (C) 2011-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -7,17 +7,17 @@ from itertools import count,repeat import json import networkx as nx -__author__ = """Aric Hagberg ([email protected]))""" +__author__ = """Aric Hagberg <[email protected]>""" __all__ = ['node_link_data', 'node_link_graph'] -def node_link_data(G): +def node_link_data(G): """Return data in node-link format that is suitable for JSON serialization and use in Javascript documents. Parameters ---------- G : NetworkX graph - + Returns ------- data : dict @@ -33,38 +33,45 @@ def node_link_data(G): >>> import json >>> s = json.dumps(data) - + Notes ----- - Graph, node, and link attributes are stored in this format but keys + Graph, node, and link attributes are stored in this format but keys for attributes must be strings if you want to serialize with JSON. See Also -------- node_link_graph, adjacency_data, tree_data """ + multigraph = G.is_multigraph() mapping = dict(zip(G,count())) data = {} data['directed'] = G.is_directed() - data['multigraph'] = G.is_multigraph() + data['multigraph'] = multigraph data['graph'] = list(G.graph.items()) data['nodes'] = [ dict(id=n, **G.node[n]) for n in G ] - data['links'] = [ dict(source=mapping[u], target=mapping[v], **d) - for u,v,d in G.edges(data=True) ] + if multigraph: + data['links'] = [ dict(source=mapping[u], target=mapping[v], key=k, **d) + for u,v,k,d in G.edges(keys=True, data=True) ] + else: + data['links'] = [ dict(source=mapping[u], target=mapping[v], **d) + for u,v,d in G.edges(data=True) ] + return data + def node_link_graph(data, directed=False, multigraph=True): - """Return graph from node-link data format. + """Return graph from node-link data format. Parameters ---------- data : dict node-link formatted graph data - - directed : bool + + directed : bool If True, and direction not specified in data, return a directed graph. - multigraph : bool + multigraph : bool If True, and multigraph not specified in data, return a multigraph. Returns @@ -100,10 +107,10 @@ def node_link_graph(data, directed=False, multigraph=True): nodedata = dict((str(k),v) for k,v in d.items() if k!='id') graph.add_node(node, **nodedata) for d in data['links']: - source = d.pop('source') - target = d.pop('target') - edgedata = dict((str(k),v) for k,v in d.items() + link_data = d.copy() + source = link_data.pop('source') + target = link_data.pop('target') + edgedata = dict((str(k),v) for k,v in d.items() if k!='source' and k!='target') graph.add_edge(mapping[source],mapping[target],**edgedata) return graph -
diff --git a/networkx/readwrite/json_graph/tests/test_adjacency.py b/networkx/readwrite/json_graph/tests/test_adjacency.py --- a/networkx/readwrite/json_graph/tests/test_adjacency.py +++ b/networkx/readwrite/json_graph/tests/test_adjacency.py @@ -28,14 +28,25 @@ def test_graph_attributes(self): assert_equal(H.graph[1],'one') assert_equal(H.node[1]['color'],'red') assert_equal(H[1][2]['width'],7) - + def test_digraph(self): G = nx.DiGraph() + G.add_path([1,2,3]) H = adjacency_graph(adjacency_data(G)) assert_true(H.is_directed()) + nx.is_isomorphic(G,H) def test_multidigraph(self): G = nx.MultiDiGraph() + G.add_path([1,2,3]) H = adjacency_graph(adjacency_data(G)) assert_true(H.is_directed()) assert_true(H.is_multigraph()) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1,2,key='first') + G.add_edge(1,2,key='second',color='blue') + H = adjacency_graph(adjacency_data(G)) + nx.is_isomorphic(G,H) + assert_equal(H[1][2]['second']['color'],'blue') diff --git a/networkx/readwrite/json_graph/tests/test_node_link.py b/networkx/readwrite/json_graph/tests/test_node_link.py --- a/networkx/readwrite/json_graph/tests/test_node_link.py +++ b/networkx/readwrite/json_graph/tests/test_node_link.py @@ -34,8 +34,11 @@ def test_digraph(self): H = node_link_graph(node_link_data(G)) assert_true(H.is_directed()) - def test_multidigraph(self): - G = nx.MultiDiGraph() + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1,2,key='first') + G.add_edge(1,2,key='second',color='blue') H = node_link_graph(node_link_data(G)) - assert_true(H.is_directed()) - assert_true(H.is_multigraph()) + nx.is_isomorphic(G,H) + assert_equal(H[1][2]['second']['color'],'blue')
networkx.readwrite.json_graph bug in serializing MultiGraphs ``` python import networkx as nx from networkx.readwrite import json_graph edges = [(0, 1, {'name': 'a', 'value': 1}), (0, 1, {'name': 'b', 'value': 2}), (0, 2, {'name': 'a', 'value': 0})] G = nx.MultiGraph() for source, target, attr in edges: G.add_edge(source, target, key=attr['name'], attr_dict=attr) dct = json_graph.adjacency_data(G) H = json_graph.adjacency_graph(dct) assert sorted(H.nodes()) == sorted(G.nodes()) assert sorted(H.edges()) == sorted(G.edges()) ``` Produces: ``` python Traceback (most recent call last): File "nx_bug.py", line 16, in <module> assert sorted(H.edges()) == sorted(G.edges()) AssertionError ``` On version: ``` python >>> import networkx >>> print networkx.__version__ 1.8.dev_20130123112027 ```
2013-01-26T15:53:43
networkx/networkx
902
networkx__networkx-902
[ "894" ]
7256e44da2f09b80264dd1d0502de935877fa307
diff --git a/networkx/generators/random_graphs.py b/networkx/generators/random_graphs.py --- a/networkx/generators/random_graphs.py +++ b/networkx/generators/random_graphs.py @@ -3,7 +3,7 @@ Generators for random graphs. """ -# Copyright (C) 2004-2011 by +# Copyright (C) 2004-2011 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -53,10 +53,10 @@ def fast_gnp_random_graph(n, p, seed=None, directed=False): p : float Probability for edge creation. seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). directed : bool, optional (default=False) - If True return a directed graph - + If True return a directed graph + Notes ----- The G_{n,p} graph algorithm chooses each of the [n(n-1)]/2 @@ -64,7 +64,7 @@ def fast_gnp_random_graph(n, p, seed=None, directed=False): This algorithm is O(n+m) where m is the expected number of edges m=p*n*(n-1)/2. - + It should be faster than gnp_random_graph when p is small and the expected number of edges is small (sparse graph). @@ -74,7 +74,7 @@ def fast_gnp_random_graph(n, p, seed=None, directed=False): References ---------- - .. [1] Vladimir Batagelj and Ulrik Brandes, + .. [1] Vladimir Batagelj and Ulrik Brandes, "Efficient generation of large random networks", Phys. Rev. E, 71, 036113, 2005. """ @@ -89,7 +89,7 @@ def fast_gnp_random_graph(n, p, seed=None, directed=False): v = 1 # Nodes in graph are from 0,n-1 (this is the second node index). w = -1 - lp = math.log(1.0 - p) + lp = math.log(1.0 - p) if directed: G=nx.DiGraph(G) @@ -122,7 +122,7 @@ def gnp_random_graph(n, p, seed=None, directed=False): Chooses each of the possible edges with probability p. - This is also called binomial_graph and erdos_renyi_graph. + This is also called binomial_graph and erdos_renyi_graph. Parameters ---------- @@ -131,11 +131,11 @@ def gnp_random_graph(n, p, seed=None, directed=False): p : float Probability for edge creation. seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). directed : bool, optional (default=False) - If True return a directed graph - - See Also + If True return a directed graph + + See Also -------- fast_gnp_random_graph @@ -169,7 +169,7 @@ def gnp_random_graph(n, p, seed=None, directed=False): edges=itertools.combinations(range(n),2) for e in edges: - if random.random() < p: + if random.random() < p: G.add_edge(*e) return G @@ -192,8 +192,8 @@ def dense_gnm_random_graph(n, m, seed=None): m : int The number of edges. seed : int, optional - Seed for random number generator (default=None). - + Seed for random number generator (default=None). + See Also -------- gnm_random_graph() @@ -215,10 +215,10 @@ def dense_gnm_random_graph(n, m, seed=None): else: G=empty_graph(n) G.name="dense_gnm_random_graph(%s,%s)"%(n,m) - + if n==1 or m>=mmax: return G - + if seed is not None: random.seed(seed) @@ -250,9 +250,9 @@ def gnm_random_graph(n, m, seed=None, directed=False): m : int The number of edges. seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). directed : bool, optional (default=False) - If True return a directed graph + If True return a directed graph """ if directed: G=nx.DiGraph() @@ -295,18 +295,18 @@ def newman_watts_strogatz_graph(n, k, p, seed=None): The number of nodes k : int Each node is connected to k nearest neighbors in ring topology - p : float + p : float The probability of adding a new edge for each edge - seed : int, optional + seed : int, optional seed for random number generator (default=None) Notes ----- First create a ring over n nodes. Then each node in the ring is - connected with its k nearest neighbors (k-1 neighbors if k is odd). - Then shortcuts are created by adding new edges as follows: - for each edge u-v in the underlying "n-ring with k nearest neighbors" - with probability p add a new edge u-w with randomly-chosen existing + connected with its k nearest neighbors (k-1 neighbors if k is odd). + Then shortcuts are created by adding new edges as follows: + for each edge u-v in the underlying "n-ring with k nearest neighbors" + with probability p add a new edge u-w with randomly-chosen existing node w. In contrast with watts_strogatz_graph(), no edges are removed. See Also @@ -322,29 +322,32 @@ def newman_watts_strogatz_graph(n, k, p, seed=None): """ if seed is not None: random.seed(seed) - if k>=n // 2: - raise nx.NetworkXError("k>=n/2, choose smaller k or larger n") + if k>=n: + raise nx.NetworkXError("k>=n, choose smaller k or larger n") G=empty_graph(n) G.name="newman_watts_strogatz_graph(%s,%s,%s)"%(n,k,p) nlist = G.nodes() fromv = nlist # connect the k/2 neighbors - for n in range(1, k // 2+1): - tov = fromv[n:] + fromv[0:n] # the first n are now last + for j in range(1, k // 2+1): + tov = fromv[j:] + fromv[0:j] # the first j are now last for i in range(len(fromv)): G.add_edge(fromv[i], tov[i]) # for each edge u-v, with probability p, randomly select existing - # node w and add new edge u-w - e = G.edges() + # node w and add new edge u-w + e = G.edges() for (u, v) in e: if random.random() < p: w = random.choice(nlist) # no self-loops and reject if edge u-w exists # is that the correct NWS model? - while w == u or G.has_edge(u, w): + while w == u or G.has_edge(u, w): w = random.choice(nlist) - G.add_edge(u,w) - return G + if G.degree(u) >= n-1: + break # skip this rewiring + else: + G.add_edge(u,w) + return G def watts_strogatz_graph(n, k, p, seed=None): @@ -357,9 +360,9 @@ def watts_strogatz_graph(n, k, p, seed=None): The number of nodes k : int Each node is connected to k nearest neighbors in ring topology - p : float - The probability of rewiring each edge - seed : int, optional + p : float + The probability of rewiring each edge + seed : int, optional Seed for random number generator (default=None) See Also @@ -370,11 +373,11 @@ def watts_strogatz_graph(n, k, p, seed=None): Notes ----- First create a ring over n nodes. Then each node in the ring is - connected with its k nearest neighbors (k-1 neighbors if k is odd). - Then shortcuts are created by replacing some edges as follows: - for each edge u-v in the underlying "n-ring with k nearest neighbors" - with probability p replace it with a new edge u-w with uniformly - random choice of existing node w. + connected with its k nearest neighbors (k-1 neighbors if k is odd). + Then shortcuts are created by replacing some edges as follows: + for each edge u-v in the underlying "n-ring with k nearest neighbors" + with probability p replace it with a new edge u-w with uniformly + random choice of existing node w. In contrast with newman_watts_strogatz_graph(), the random rewiring does not increase the number of edges. The rewired graph @@ -386,8 +389,8 @@ def watts_strogatz_graph(n, k, p, seed=None): Collective dynamics of small-world networks, Nature, 393, pp. 440--442, 1998. """ - if k>=n/2: - raise nx.NetworkXError("k>=n/2, choose smaller k or larger n") + if k>=n: + raise nx.NetworkXError("k>=n, choose smaller k or larger n") if seed is not None: random.seed(seed) @@ -404,21 +407,24 @@ def watts_strogatz_graph(n, k, p, seed=None): for j in range(1, k // 2+1): # outer loop is neighbors targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list # inner loop in node order - for u,v in zip(nodes,targets): + for u,v in zip(nodes,targets): if random.random() < p: w = random.choice(nodes) # Enforce no self-loops or multiple edges - while w == u or G.has_edge(u, w): + while w == u or G.has_edge(u, w): w = random.choice(nodes) - G.remove_edge(u,v) - G.add_edge(u,w) - return G + if G.degree(u) >= n-1: + break # skip this rewiring + else: + G.remove_edge(u,v) + G.add_edge(u,w) + return G def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None): """Return a connected Watts-Strogatz small-world graph. - Attempt to generate a connected realization by repeated - generation of Watts-Strogatz small-world graphs. + Attempt to generate a connected realization by repeated + generation of Watts-Strogatz small-world graphs. An exception is raised if the maximum number of tries is exceeded. Parameters @@ -427,10 +433,10 @@ def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None): The number of nodes k : int Each node is connected to k nearest neighbors in ring topology - p : float - The probability of rewiring each edge + p : float + The probability of rewiring each edge tries : int - Number of attempts to generate a connected graph. + Number of attempts to generate a connected graph. seed : int, optional The seed for random number generator. @@ -452,7 +458,7 @@ def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None): def random_regular_graph(d, n, seed=None): """Return a random regular graph of n nodes each with degree d. - + The resulting graph G has no self-loops or parallel edges. Parameters @@ -495,8 +501,8 @@ def random_regular_graph(d, n, seed=None): random.seed(seed) def _suitable(edges, potential_edges): - # Helper subroutine to check if there are suitable edges remaining - # If False, the generation of the graph has failed + # Helper subroutine to check if there are suitable edges remaining + # If False, the generation of the graph has failed if not potential_edges: return True for s1 in potential_edges: @@ -537,15 +543,15 @@ def _try_creation(): stubs = [node for node, potential in potential_edges.items() for _ in range(potential)] - return edges + return edges - # Even though a suitable edge set exists, - # the generation of such a set is not guaranteed. + # Even though a suitable edge set exists, + # the generation of such a set is not guaranteed. # Try repeatedly to find one. edges = _try_creation() while edges is None: edges = _try_creation() - + G = nx.Graph() G.name = "random_regular_graph(%s, %s)" % (d, n) G.add_edges_from(edges) @@ -563,14 +569,14 @@ def _random_subset(seq,m): x=random.choice(seq) targets.add(x) return targets - + def barabasi_albert_graph(n, m, seed=None): """Return random graph using Barabási-Albert preferential attachment model. - + A graph of n nodes is grown by attaching new nodes each with m edges that are preferentially attached to existing nodes with high degree. - + Parameters ---------- n : int @@ -578,12 +584,12 @@ def barabasi_albert_graph(n, m, seed=None): m : int Number of edges to attach from a new node to existing nodes seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). Returns ------- G : Graph - + Notes ----- The initialization is a graph with with m nodes and no edges. @@ -593,38 +599,38 @@ def barabasi_albert_graph(n, m, seed=None): .. [1] A. L. Barabási and R. Albert "Emergence of scaling in random networks", Science 286, pp 509-512, 1999. """ - + if m < 1 or m >=n: raise nx.NetworkXError(\ "Barabási-Albert network must have m>=1 and m<n, m=%d,n=%d"%(m,n)) if seed is not None: - random.seed(seed) + random.seed(seed) - # Add m initial nodes (m0 in barabasi-speak) + # Add m initial nodes (m0 in barabasi-speak) G=empty_graph(m) G.name="barabasi_albert_graph(%s,%s)"%(n,m) # Target nodes for new edges targets=list(range(m)) - # List of existing nodes, with nodes repeated once for each adjacent edge - repeated_nodes=[] + # List of existing nodes, with nodes repeated once for each adjacent edge + repeated_nodes=[] # Start adding the other n-m nodes. The first node is m. - source=m - while source<n: + source=m + while source<n: # Add edges to m nodes from the source. - G.add_edges_from(zip([source]*m,targets)) + G.add_edges_from(zip([source]*m,targets)) # Add one node to the list for each new edge just created. repeated_nodes.extend(targets) # And the new node "source" has m edges to add to the list. - repeated_nodes.extend([source]*m) - # Now choose m unique nodes from the existing nodes - # Pick uniformly from repeated_nodes (preferential attachement) + repeated_nodes.extend([source]*m) + # Now choose m unique nodes from the existing nodes + # Pick uniformly from repeated_nodes (preferential attachement) targets = _random_subset(repeated_nodes,m) source += 1 return G def powerlaw_cluster_graph(n, m, p, seed=None): """Holme and Kim algorithm for growing graphs with powerlaw - degree distribution and approximate average clustering. + degree distribution and approximate average clustering. Parameters ---------- @@ -635,21 +641,21 @@ def powerlaw_cluster_graph(n, m, p, seed=None): p : float, Probability of adding a triangle after adding a random edge seed : int, optional - Seed for random number generator (default=None). - + Seed for random number generator (default=None). + Notes ----- - The average clustering has a hard time getting above + The average clustering has a hard time getting above a certain cutoff that depends on m. This cutoff is often quite low. Note that the transitivity (fraction of triangles to possible - triangles) seems to go down with network size. + triangles) seems to go down with network size. It is essentially the Barabási-Albert (B-A) growth model with an extra step that each random edge is followed by a chance of making an edge to one of its neighbors too (and thus a triangle). - + This algorithm improves on B-A in the sense that it enables a - higher average clustering to be attained if desired. + higher average clustering to be attained if desired. It seems possible to have a disconnected graph with this algorithm since the initial m nodes may not be all linked to a new node @@ -670,35 +676,35 @@ def powerlaw_cluster_graph(n, m, p, seed=None): raise nx.NetworkXError(\ "NetworkXError p must be in [0,1], p=%f"%(p)) if seed is not None: - random.seed(seed) + random.seed(seed) G=empty_graph(m) # add m initial nodes (m0 in barabasi-speak) G.name="Powerlaw-Cluster Graph" repeated_nodes=G.nodes() # list of existing nodes to sample from - # with nodes repeated once for each adjacent edge + # with nodes repeated once for each adjacent edge source=m # next node is m while source<n: # Now add the other n-1 nodes possible_targets = _random_subset(repeated_nodes,m) # do one preferential attachment for new node target=possible_targets.pop() - G.add_edge(source,target) + G.add_edge(source,target) repeated_nodes.append(target) # add one node to list for each new link count=1 while count<m: # add m-1 more new links - if random.random()<p: # clustering step: add triangle + if random.random()<p: # clustering step: add triangle neighborhood=[nbr for nbr in G.neighbors(target) \ if not G.has_edge(source,nbr) \ and not nbr==source] if neighborhood: # if there is a neighbor without a link nbr=random.choice(neighborhood) G.add_edge(source,nbr) # add triangle - repeated_nodes.append(nbr) + repeated_nodes.append(nbr) count=count+1 continue # go to top of while loop # else do preferential attachment step if above fails target=possible_targets.pop() - G.add_edge(source,target) - repeated_nodes.append(target) + G.add_edge(source,target) + repeated_nodes.append(target) count=count+1 repeated_nodes.extend([source]*m) # add source node to list m times @@ -713,7 +719,7 @@ def random_lobster(n, p1, p2, seed=None): A caterpillar is a tree that reduces to a path graph when pruning all leaf nodes (p2=0). - + Parameters ---------- n : int @@ -723,7 +729,7 @@ def random_lobster(n, p1, p2, seed=None): p2 : float Probability of adding an edge one level beyond backbone seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). """ # a necessary ingredient in any self-respecting graph library if seed is not None: @@ -747,7 +753,7 @@ def random_shell_graph(constructor, seed=None): Parameters ---------- - constructor: a list of three-tuples + constructor: a list of three-tuples (n,m,d) for each shell starting at the center shell. n : int The number of nodes in the shell @@ -757,12 +763,12 @@ def random_shell_graph(constructor, seed=None): The ratio of inter-shell (next) edges to intra-shell edges. d=0 means no intra shell edges, d=1 for the last shell seed : int, optional - Seed for random number generator (default=None). - + Seed for random number generator (default=None). + Examples -------- >>> constructor=[(10,20,0.8),(20,40,0.8)] - >>> G=nx.random_shell_graph(constructor) + >>> G=nx.random_shell_graph(constructor) """ G=empty_graph(0) @@ -771,7 +777,7 @@ def random_shell_graph(constructor, seed=None): if seed is not None: random.seed(seed) - glist=[] + glist=[] intra_edges=[] nnodes=0 # create gnm graphs for each shell @@ -782,10 +788,10 @@ def random_shell_graph(constructor, seed=None): gnm_random_graph(n,inter_edges), first_label=nnodes) glist.append(g) - nnodes+=n + nnodes+=n G=nx.operators.union(G,g) - # connect the shells randomly + # connect the shells randomly for gi in range(len(glist)-1): nlist1=glist[gi].nodes() nlist2=glist[gi+1].nodes() @@ -812,15 +818,15 @@ def random_powerlaw_tree(n, gamma=3, seed=None, tries=100): gamma : float Exponent of the power-law seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). tries : int - Number of attempts to adjust sequence to make a tree + Number of attempts to adjust sequence to make a tree Notes ----- A trial powerlaw degree sequence is chosen and then elements are swapped with new elements from a powerlaw distribution until - the sequence makes a tree (#edges=#nodes-1). + the sequence makes a tree (#edges=#nodes-1). """ from networkx.generators.degree_seq import degree_sequence_tree @@ -847,26 +853,26 @@ def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100): gamma : float Exponent of the power-law seed : int, optional - Seed for random number generator (default=None). + Seed for random number generator (default=None). tries : int - Number of attempts to adjust sequence to make a tree + Number of attempts to adjust sequence to make a tree Notes ----- A trial powerlaw degree sequence is chosen and then elements are swapped with new elements from a powerlaw distribution until - the sequence makes a tree (#edges=#nodes-1). + the sequence makes a tree (#edges=#nodes-1). """ if seed is not None: random.seed(seed) - # get trial sequence + # get trial sequence z=nx.utils.powerlaw_sequence(n,exponent=gamma) # round to integer values in the range [0,n] zseq=[min(n, max( int(round(s)),0 )) for s in z] - + # another sequence to swap values from z=nx.utils.powerlaw_sequence(tries,exponent=gamma) # round to integer values in the range [0,n] @@ -877,7 +883,7 @@ def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100): return zseq index=random.randint(0,n-1) zseq[index]=swap.pop() - + raise nx.NetworkXError(\ "Exceeded max (%d) attempts for a valid tree sequence."%tries) return False
diff --git a/networkx/generators/tests/test_random_graphs.py b/networkx/generators/tests/test_random_graphs.py --- a/networkx/generators/tests/test_random_graphs.py +++ b/networkx/generators/tests/test_random_graphs.py @@ -117,3 +117,13 @@ def test_gnm(self): G=gnm_random_graph(10,-1.1) assert_equal(len(G),10) assert_equal(len(G.edges()),0) + + def test_watts_strogatz_big_k(self): + assert_raises(networkx.exception.NetworkXError, + watts_strogatz_graph, 10, 10, 0.25) + assert_raises(networkx.exception.NetworkXError, + newman_watts_strogatz_graph, 10, 10, 0.25) + # could create an infinite loop, now doesn't + # infinite loop used to occur when a node has degree n-1 and needs to rewire + watts_strogatz_graph(10, 9, 0.25, seed=0) + newman_watts_strogatz_graph(10, 9, 0.5, seed=0)
Fixed a wrong check in watts_strogatz_graph Dear networkx developers, there was a wrong check in watts_strogatz_graph which made the following valid code break: import networkx as nx nx.watts_strogatz_graph(10,6,0.0) This would give the following error: networkx.exception.NetworkXError: k>=n/2, choose smaller k or larger n However, it should yield a ring network, with every node attached to 6 neighbours. This patch fixes it. Could you please merge? Best regards, Andreas Amann
2013-07-18T17:25:42
networkx/networkx
914
networkx__networkx-914
[ "912" ]
7d9682a07dcae30acab3c4841e33d31f727a3fb2
diff --git a/networkx/readwrite/gexf.py b/networkx/readwrite/gexf.py --- a/networkx/readwrite/gexf.py +++ b/networkx/readwrite/gexf.py @@ -395,7 +395,10 @@ def add_attributes(self, node_or_edge, xml_obj, data, default): # static data e=Element("attvalue") e.attrib['for']=attr_id - e.attrib['value']=make_str(v) + if type(v) == bool: + e.attrib['value']=make_str(v).lower() + else: + e.attrib['value']=make_str(v) attvalues.append(e) xml_obj.append(attvalues) return data
diff --git a/networkx/readwrite/tests/test_gexf.py b/networkx/readwrite/tests/test_gexf.py --- a/networkx/readwrite/tests/test_gexf.py +++ b/networkx/readwrite/tests/test_gexf.py @@ -303,4 +303,11 @@ def test_write_with_node_attributes(self): obtained = '\n'.join(nx.generate_gexf(G)) assert_equal( expected, obtained ) - + def test_bool(self): + G=nx.Graph() + G.add_node(1, testattr=True) + fh = io.BytesIO() + nx.write_gexf(G,fh) + fh.seek(0) + H=nx.read_gexf(fh,node_type=int) + assert_equal(H.node[1]['testattr'], True)
[NX1.7] Crash with read_gexf(g) if there are boolean attributes Repro steps: > > > import networkx as nx > > > g = nx.Graph() > > > g.add_node(1, attrib=True) > > > nx.write_gexf(g, 'out.gexf') > > > nx.read_gexf('out.gexf') How to fix it: In networkx/readwrite/gexf.py : line 208: convert_bool={'true':True,'false':False} edit into: line 208: convert_bool={'True':True, 'false':False}
That probably isn't the correct fix. The file should have lower case "true" and "false" according to the GEXF spec. So it is the writer not the reader that needs to be fixed. Oh. How about: In networkx/utils/misc.py : add the following after line 70: if type(t) == bool: ....if t: ....,,,,return 'true' ....else: ....,,,,return 'false' That will mess with other routines that expect the capitalization. Looks like make_str should just be defined for the gexf case explicitly in gexf.py instead of importing from utils. Seems to me that when the format has a specified need for lower case we should handle it specifically in that readwrite module.
2013-07-24T23:20:07
networkx/networkx
926
networkx__networkx-926
[ "885" ]
771b9643cfe4876ce4a3cb1b5150abf34863612b
diff --git a/networkx/readwrite/gexf.py b/networkx/readwrite/gexf.py --- a/networkx/readwrite/gexf.py +++ b/networkx/readwrite/gexf.py @@ -358,6 +358,10 @@ def edge_key_data(G): source=source_id,target=target_id, **kw) default=G.graph.get('edge_default',{}) + if self.version == '1.1': + edge_data=self.add_slices(edge_element, edge_data) + else: + edge_data=self.add_spells(edge_element, edge_data) edge_data=self.add_viz(edge_element,edge_data) edge_data=self.add_attributes("edge", edge_element, edge_data, default) @@ -493,19 +497,19 @@ def add_parents(self,node_element,node_data): node_element.append(parents_element) return node_data - def add_slices(self,node_element,node_data): - slices=node_data.pop('slices',False) + def add_slices(self,node_or_edge_element,node_or_edge_data): + slices=node_or_edge_data.pop('slices',False) if slices: slices_element=Element('slices') for start,end in slices: e=Element('slice',start=str(start),end=str(end)) slices_element.append(e) - node_element.append(slices_element) - return node_data + node_or_edge_element.append(slices_element) + return node_or_edge_data - def add_spells(self,node_element,node_data): - spells=node_data.pop('spells',False) + def add_spells(self,node_or_edge_element,node_or_edge_data): + spells=node_or_edge_data.pop('spells',False) if spells: spells_element=Element('spells') for start,end in spells: @@ -515,8 +519,8 @@ def add_spells(self,node_element,node_data): if end is not None: e.attrib['end']=make_str(end) spells_element.append(e) - node_element.append(spells_element) - return node_data + node_or_edge_element.append(spells_element) + return node_or_edge_data def write(self, fh): @@ -739,8 +743,8 @@ def add_parents(self, data, node_xml): data['parents'].append(parent) return data - def add_slices(self, data, node_xml): - slices_element=node_xml.find("{%s}slices"%self.NS_GEXF) + def add_slices(self, data, node_or_edge_xml): + slices_element=node_or_edge_xml.find("{%s}slices"%self.NS_GEXF) if slices_element is not None: data['slices']=[] for s in slices_element.findall("{%s}slice"%self.NS_GEXF): @@ -749,8 +753,8 @@ def add_slices(self, data, node_xml): data['slices'].append((start,end)) return data - def add_spells(self, data, node_xml): - spells_element=node_xml.find("{%s}spells"%self.NS_GEXF) + def add_spells(self, data, node_or_edge_xml): + spells_element=node_or_edge_xml.find("{%s}spells"%self.NS_GEXF) if spells_element is not None: data['spells']=[] for s in spells_element.findall("{%s}spell"%self.NS_GEXF): @@ -782,6 +786,11 @@ def add_edge(self, G, edge_element, edge_attr): data = self.decode_attr_elements(edge_attr, edge_element) data = self.add_start_end(data,edge_element) + if self.version=='1.1': + data = self.add_slices(data, edge_element) # add slices + else: + data = self.add_spells(data, edge_element) # add spells + # GEXF stores edge ids as an attribute # NetworkX uses them as keys in multigraphs # if networkx_key is not specified as an attribute
Error writing edge spells to .gexf Problem: I am trying to export a DYNAMIC graph to a .gexf file for use in Gephi. Therefore I use spells for the nodes and edges as an added attribute (e.g. 'spells': [(start, end), ...]) but when I write a .gexf file using nx.write_gexf(...), nodes write the spells just fine, but the edges don't have that same functionality. Specifically, it is missing: add_spells(...) in the add_edge(...) member of gexf.py.
2013-08-11T03:48:25
networkx/networkx
929
networkx__networkx-929
[ "928" ]
771b9643cfe4876ce4a3cb1b5150abf34863612b
diff --git a/networkx/readwrite/gexf.py b/networkx/readwrite/gexf.py --- a/networkx/readwrite/gexf.py +++ b/networkx/readwrite/gexf.py @@ -236,8 +236,7 @@ def set_version(self,version): class GEXFWriter(GEXF): # class for writing GEXF format files # use write_gexf() function - def __init__(self, graph=None, encoding="utf-8", - mode='static',prettyprint=True, + def __init__(self, graph=None, encoding="utf-8", prettyprint=True, version='1.1draft'): try: import xml.etree.ElementTree @@ -245,7 +244,6 @@ def __init__(self, graph=None, encoding="utf-8", raise ImportError('GEXF writer requires ' 'xml.elementtree.ElementTree') self.prettyprint=prettyprint - self.mode=mode self.encoding = encoding self.set_version(version) self.xml = Element("gexf", @@ -277,12 +275,18 @@ def __str__(self): return s def add_graph(self, G): + # set graph attributes + if G.graph.get('mode')=='dynamic': + mode='dynamic' + else: + mode='static' + # Add a graph element to the XML if G.is_directed(): default='directed' else: default='undirected' - graph_element = Element("graph",defaultedgetype=default,mode=self.mode) + graph_element = Element("graph",defaultedgetype=default,mode=mode) self.graph_element=graph_element self.add_nodes(G,graph_element) self.add_edges(G,graph_element) @@ -575,10 +579,6 @@ def __call__(self, stream): def make_graph(self, graph_xml): - # mode is "static" or "dynamic" - graph_mode = graph_xml.get("mode", "") - self.dynamic=(graph_mode=='dynamic') - # start with empty DiGraph or MultiDiGraph edgedefault = graph_xml.get("defaultedgetype", None) if edgedefault=='directed': @@ -593,6 +593,11 @@ def make_graph(self, graph_xml): graph_end=graph_xml.get('end') if graph_end is not None: G.graph['end']=graph_end + graph_mode=graph_xml.get("mode", "") + if graph_mode=='dynamic': + G.graph['mode']='dynamic' + else: + G.graph['mode']='static' # node and edge attributes attributes_elements=graph_xml.findall("{%s}attributes"%self.NS_GEXF)
diff --git a/networkx/readwrite/tests/test_gexf.py b/networkx/readwrite/tests/test_gexf.py --- a/networkx/readwrite/tests/test_gexf.py +++ b/networkx/readwrite/tests/test_gexf.py @@ -265,6 +265,10 @@ def test_default_attribute(self): assert_equal( sorted(sorted(e) for e in G.edges()), sorted(sorted(e) for e in H.edges())) + # Reading a gexf graph always sets mode attribute to either + # 'static' or 'dynamic'. Remove the mode attribute from the + # read graph for the sake of comparing remaining attributes. + del H.graph['mode'] assert_equal(G.graph,H.graph) def test_serialize_ints_to_strings(self):
Gephi writer outputs static graph even when graph is dynamic This pertains to serializing a networkx graph to the Gephi `.gexf` format. Calling `write_gexf` on a networkx graph will always output the graph `mode` as `static`, which is an issue when the graph mode is actually `dynamic`. The implementation of `write_gexf` (`networkx/networkx/readwrite/gexf.py`) minus any comments is: ``` python def write_gexf(G, path, encoding='utf-8',prettyprint=True,version='1.1draft'): writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint, version=version) writer.add_graph(G) writer.write(path) ``` `GEXFWriter`'s `__init__` has an optional graph mode parameter; see the method signature below: ``` python def __init__(self, graph=None, encoding="utf-8", mode='static',prettyprint=True, version='1.1draft'): ``` `write_gexf` does not specify the mode when instantiating `GEXFWriter` though and so the default value of `static` is always used, and, `__init__` sets `self.mode=mode`. The following call to `writer.add_graph(G)` ultimately uses `self.mode` to determine what gets written to the output XML, and so the `mode` attribute of the graph always shows as `static` in the output. A quick fix would be to `i)` add a `mode` parameter to `write_gexf` allowing the user to specify the graph type and `ii)` pass the argument through to the instantiation of `GEXFWriter`. The problem with this is that it leaves it up to the user to supply the correct graph type at the time `write_gexf` is called. A more appropriate fix is probably to store the mode as an attribute of the networkx graph and use this to determine the value of `mode` as it appears in the output XML. This also means `mode` should be removed as a parameter to `GEXFWriter's` `__init__` call.
2013-08-11T13:12:28
networkx/networkx
942
networkx__networkx-942
[ "938" ]
eaa0c3436896ccef1db779fde4e17fd27a5ade6b
diff --git a/networkx/relabel.py b/networkx/relabel.py --- a/networkx/relabel.py +++ b/networkx/relabel.py @@ -72,25 +72,25 @@ def relabel_nodes(G, mapping, copy=True): # you can pass a function f(old_label)->new_label # but we'll just make a dictionary here regardless if not hasattr(mapping,"__getitem__"): - m = dict((n,mapping(n)) for n in G) + m = dict((n, mapping(n)) for n in G) else: - m=mapping + m = mapping if copy: - return _relabel_copy(G,m) + return _relabel_copy(G, m) else: - return _relabel_inplace(G,m) + return _relabel_inplace(G, m) def _relabel_inplace(G, mapping): - old_labels=set(mapping.keys()) - new_labels=set(mapping.values()) + old_labels = set(mapping.keys()) + new_labels = set(mapping.values()) if len(old_labels & new_labels) > 0: # labels sets overlap # can we topological sort and still do the relabeling? - D=nx.DiGraph(list(mapping.items())) + D = nx.DiGraph(list(mapping.items())) D.remove_edges_from(D.selfloop_edges()) try: - nodes=nx.topological_sort(D) + nodes = nx.topological_sort(D) except nx.NetworkXUnfeasible: raise nx.NetworkXUnfeasible('The node label sets are overlapping ' 'and no ordering can resolve the ' @@ -98,50 +98,50 @@ def _relabel_inplace(G, mapping): nodes.reverse() # reverse topological order else: # non-overlapping label sets - nodes=old_labels + nodes = old_labels multigraph = G.is_multigraph() directed = G.is_directed() for old in nodes: try: - new=mapping[old] + new = mapping[old] except KeyError: continue try: - G.add_node(new,attr_dict=G.node[old]) + G.add_node(new, attr_dict=G.node[old]) except KeyError: raise KeyError("Node %s is not in the graph"%old) if multigraph: - new_edges=[(new,old == target and new or target,key,data) - for (_,target,key,data) - in G.edges(old,data=True,keys=True)] + new_edges = [(new, old == target and new or target, key, data) + for (_,target,key,data) + in G.edges(old, data=True, keys=True)] if directed: - new_edges+=[(old == source and new or source,new,key,data) - for (source,_,key,data) - in G.in_edges(old,data=True,keys=True)] + new_edges += [(old == source and new or source, new, key, data) + for (source, _, key,data) + in G.in_edges(old, data=True, keys=True)] else: - new_edges=[(new,old == target and new or target,data) - for (_,target,data) in G.edges(old,data=True)] + new_edges = [(new, old == target and new or target, data) + for (_,target,data) in G.edges(old, data=True)] if directed: - new_edges+=[(old == source and new or source,new,data) - for (source,_,data) in G.in_edges(old,data=True)] + new_edges += [(old == source and new or source,new,data) + for (source,_,data) in G.in_edges(old, data=True)] G.remove_node(old) G.add_edges_from(new_edges) return G def _relabel_copy(G, mapping): - H=G.__class__() - H.name="(%s)" % G.name + H = G.__class__() + H.name = "(%s)" % G.name if G.is_multigraph(): - H.add_edges_from( (mapping.get(n1,n1),mapping.get(n2,n2),k,d.copy()) - for (n1,n2,k,d) in G.edges_iter(keys=True,data=True)) + H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy()) + for (n1,n2,k,d) in G.edges_iter(keys=True, data=True)) else: - H.add_edges_from( (mapping.get(n1,n1),mapping.get(n2,n2),d.copy()) - for (n1,n2,d) in G.edges_iter(data=True)) + H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),d.copy()) + for (n1, n2, d) in G.edges_iter(data=True)) - H.add_nodes_from(mapping.get(n,n) for n in G) - H.node.update(dict((mapping.get(n,n),d.copy()) for n,d in G.node.items())) + H.add_nodes_from(mapping.get(n, n) for n in G) + H.node.update(dict((mapping.get(n, n), d.copy()) for n,d in G.node.items())) H.graph.update(G.graph.copy()) return H @@ -149,7 +149,8 @@ def _relabel_copy(G, mapping): def convert_node_labels_to_integers(G, first_label=0, ordering="default", label_attribute=None): - """Return a copy of the graph G with the nodes relabeled with integers. + """Return a copy of the graph G with the nodes relabeled using + consecutive integers. Parameters ---------- @@ -157,8 +158,8 @@ def convert_node_labels_to_integers(G, first_label=0, ordering="default", A NetworkX graph first_label : int, optional (default=0) - An integer specifying the offset in numbering nodes. - The n new integer labels are numbered first_label, ..., n-1+first_label. + An integer specifying the starting offset in numbering nodes. + The new integer labels are numbered first_label, ..., n-1+first_label. ordering : string "default" : inherit node ordering from G.nodes() @@ -180,24 +181,24 @@ def convert_node_labels_to_integers(G, first_label=0, ordering="default", """ N = G.number_of_nodes()+first_label if ordering == "default": - mapping = dict(zip(G.nodes(),range(first_label,N))) + mapping = dict(zip(G.nodes(), range(first_label, N))) elif ordering == "sorted": nlist = G.nodes() nlist.sort() - mapping=dict(zip(nlist,range(first_label,N))) + mapping = dict(zip(nlist, range(first_label, N))) elif ordering == "increasing degree": - dv_pairs=[(d,n) for (n,d) in G.degree_iter()] + dv_pairs = [(d,n) for (n,d) in G.degree_iter()] dv_pairs.sort() # in-place sort from lowest to highest degree - mapping = dict(zip([n for d,n in dv_pairs],range(first_label,N))) + mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N))) elif ordering == "decreasing degree": dv_pairs = [(d,n) for (n,d) in G.degree_iter()] dv_pairs.sort() # in-place sort from lowest to highest degree dv_pairs.reverse() - mapping = dict(zip([n for d,n in dv_pairs],range(first_label,N))) + mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N))) else: raise nx.NetworkXError('Unknown node ordering: %s'%ordering) - H = relabel_nodes(G,mapping) - H.name="("+G.name+")_with_int_labels" + H = relabel_nodes(G, mapping) + H.name = "("+G.name+")_with_int_labels" # create node attribute with the old label if label_attribute is not None: nx.set_node_attributes(H, label_attribute,
[SUGGESTION] Change convert_node_labels_to_integers to behave nicer While there is nothing wrong with convert_node_labels_to_integers(), I suggest changing its name and/or description, for I find it a bit misleading right now. Right now it says: "Return a copy of the graph G with the nodes relabeled with integers.", without further explaining (magic integers?). I think it'd be nice if it said something like: "Return a copy of the graph G with the nodes relabeled to a succession of consecutive integers." I am aware this is a very subtle change, but I believe it will work to make it easier to understand. Cheers PS: now the title of this thing is misleading. Sorry about that.
Good idea. We'll update the documentation.
2013-08-25T14:19:15
networkx/networkx
943
networkx__networkx-943
[ "920" ]
1509f2b84c5b6c9145625af0b20baf7ac095ac0f
diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py --- a/networkx/algorithms/centrality/eigenvector.py +++ b/networkx/algorithms/centrality/eigenvector.py @@ -1,29 +1,30 @@ """ Eigenvector centrality. """ -# Copyright (C) 2004-2011 by +# Copyright (C) 2004-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. import networkx as nx -__author__ = "\n".join(['Aric Hagberg ([email protected])', +__author__ = "\n".join(['Aric Hagberg ([email protected])', 'Pieter Swart ([email protected])', 'Sasha Gutfraind ([email protected])']) __all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy'] -def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): +def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, + weight='weight'): """Compute the eigenvector centrality for the graph G. - Uses the power method to find the eigenvector for the + Uses the power method to find the eigenvector for the largest eigenvalue of the adjacency matrix of G. Parameters ---------- G : graph - A networkx graph + A networkx graph max_iter : interger, optional Maximum number of iterations in power method. @@ -32,7 +33,11 @@ def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): Error tolerance used to check convergence in power method iteration. nstart : dictionary, optional - Starting value of eigenvector iteration for each node. + Starting value of eigenvector iteration for each node. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. Returns ------- @@ -41,8 +46,8 @@ def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): Examples -------- - >>> G=nx.path_graph(4) - >>> centrality=nx.eigenvector_centrality(G) + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality(G) >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality]) ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] @@ -67,49 +72,56 @@ def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph: raise nx.NetworkXException("Not defined for multigraphs.") - if len(G)==0: + if len(G) == 0: raise nx.NetworkXException("Empty graph.") if nstart is None: - # choose starting vector with entries of 1/len(G) - x=dict([(n,1.0/len(G)) for n in G]) + # choose starting vector with entries of 1/len(G) + x = dict([(n,1.0/len(G)) for n in G]) else: - x=nstart + x = nstart # normalize starting vector - s=1.0/sum(x.values()) - for k in x: x[k]*=s - nnodes=G.number_of_nodes() - # make up to max_iter iterations + s = 1.0/sum(x.values()) + for k in x: + x[k] *= s + nnodes = G.number_of_nodes() + # make up to max_iter iterations for i in range(max_iter): - xlast=x - x=dict.fromkeys(xlast, 0) + xlast = x + x = dict.fromkeys(xlast, 0) # do the multiplication y=Ax for n in x: for nbr in G[n]: - x[n]+=xlast[nbr]*G[n][nbr].get('weight',1) + x[n] += xlast[nbr]*G[n][nbr].get(weight, 1) # normalize vector try: - s=1.0/sqrt(sum(v**2 for v in x.values())) + s = 1.0/sqrt(sum(v**2 for v in x.values())) # this should never be zero? except ZeroDivisionError: - s=1.0 - for n in x: x[n]*=s - # check convergence - err=sum([abs(x[n]-xlast[n]) for n in x]) + s = 1.0 + for n in x: + x[n] *= s + # check convergence + err = sum([abs(x[n]-xlast[n]) for n in x]) if err < nnodes*tol: return x - raise nx.NetworkXError("""eigenvector_centrality(): + raise nx.NetworkXError("""eigenvector_centrality(): power iteration failed to converge in %d iterations."%(i+1))""") -def eigenvector_centrality_numpy(G): +def eigenvector_centrality_numpy(G, weight='weight'): """Compute the eigenvector centrality for the graph G. Parameters ---------- G : graph - A networkx graph + A networkx graph + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Returns ------- @@ -118,8 +130,8 @@ def eigenvector_centrality_numpy(G): Examples -------- - >>> G=nx.path_graph(4) - >>> centrality=nx.eigenvector_centrality_numpy(G) + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality_numpy(G) >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality]) ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] @@ -145,17 +157,17 @@ def eigenvector_centrality_numpy(G): if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph: raise nx.NetworkXException('Not defined for multigraphs.') - if len(G)==0: + if len(G) == 0: raise nx.NetworkXException('Empty graph.') - A=nx.adj_matrix(G,nodelist=G.nodes()) - eigenvalues,eigenvectors=np.linalg.eig(A) + A = nx.adj_matrix(G, nodelist=G.nodes(), weight='weight') + eigenvalues,eigenvectors = np.linalg.eig(A) # eigenvalue indices in reverse sorted order - ind=eigenvalues.argsort()[::-1] + ind = eigenvalues.argsort()[::-1] # eigenvector of largest eigenvalue at ind[0], normalized - largest=np.array(eigenvectors[:,ind[0]]).flatten().real - norm=np.sign(largest.sum())*np.linalg.norm(largest) - centrality=dict(zip(G,map(float,largest/norm))) + largest = np.array(eigenvectors[:,ind[0]]).flatten().real + norm = np.sign(largest.sum())*np.linalg.norm(largest) + centrality = dict(zip(G,map(float,largest/norm))) return centrality diff --git a/networkx/algorithms/centrality/katz.py b/networkx/algorithms/centrality/katz.py --- a/networkx/algorithms/centrality/katz.py +++ b/networkx/algorithms/centrality/katz.py @@ -8,8 +8,8 @@ # All rights reserved. # BSD license. import networkx as nx -from networkx.utils import * -__author__ = "\n".join(['Aric Hagberg ([email protected])', +from networkx.utils import not_implemented_for +__author__ = "\n".join(['Aric Hagberg ([email protected])', 'Pieter Swart ([email protected])', 'Sasha Gutfraind ([email protected])', 'Vincent Gauthier ([email protected])']) @@ -19,7 +19,8 @@ @not_implemented_for('multigraph') def katz_centrality(G, alpha=0.1, beta=1.0, - max_iter=1000, tol=1.0e-6, nstart=None, normalized=True): + max_iter=1000, tol=1.0e-6, nstart=None, normalized=True, + weight = 'weight'): r"""Compute the Katz centrality for the nodes of the graph G. @@ -76,6 +77,10 @@ def katz_centrality(G, alpha=0.1, beta=1.0, normalized : bool, optional (default=True) If True normalize the resulting values. + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Returns ------- nodes : dictionary @@ -121,16 +126,16 @@ def katz_centrality(G, alpha=0.1, beta=1.0, """ from math import sqrt - if len(G)==0: + if len(G) == 0: return {} - nnodes=G.number_of_nodes() + nnodes = G.number_of_nodes() if nstart is None: # choose starting vector with entries of 0 - x=dict([(n,0) for n in G]) + x = dict([(n,0) for n in G]) else: - x=nstart + x = nstart try: b = dict.fromkeys(G,float(beta)) @@ -142,35 +147,36 @@ def katz_centrality(G, alpha=0.1, beta=1.0, # make up to max_iter iterations for i in range(max_iter): - xlast=x - x=dict.fromkeys(xlast, 0) + xlast = x + x = dict.fromkeys(xlast, 0) # do the multiplication y = Alpha * Ax - Beta for n in x: for nbr in G[n]: - x[n] += xlast[nbr] * G[n][nbr].get('weight',1) + x[n] += xlast[nbr] * G[n][nbr].get(weight,1) x[n] = alpha*x[n] + b[n] # check convergence - err=sum([abs(x[n]-xlast[n]) for n in x]) + err = sum([abs(x[n]-xlast[n]) for n in x]) if err < nnodes*tol: if normalized: # normalize vector try: - s=1.0/sqrt(sum(v**2 for v in x.values())) + s = 1.0/sqrt(sum(v**2 for v in x.values())) # this should never be zero? except ZeroDivisionError: - s=1.0 + s = 1.0 else: s = 1 for n in x: - x[n]*=s + x[n] *= s return x raise nx.NetworkXError('Power iteration failed to converge in ', '%d iterations."%(i+1))') @not_implemented_for('multigraph') -def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True): +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, + weight = 'weight'): r"""Compute the Katz centrality for the graph G. @@ -217,6 +223,10 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True): normalized : bool If True normalize the resulting values. + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Returns ------- nodes : dictionary @@ -260,14 +270,14 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True): import numpy as np except ImportError: raise ImportError('Requires NumPy: http://scipy.org/') - if len(G)==0: + if len(G) == 0: return {} try: nodelist = beta.keys() if set(nodelist) != set(G): raise nx.NetworkXError('beta dictionary ' 'must have a value for every node') - b = np.array(list(beta.values()),dtype=float) + b = np.array(list(beta.values()), dtype=float) except AttributeError: nodelist = G.nodes() try: @@ -275,14 +285,14 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True): except (TypeError,ValueError): raise nx.NetworkXError('beta must be a number') - A=nx.adj_matrix(G, nodelist=nodelist) + A = nx.adj_matrix(G, nodelist=nodelist, weight=weight) n = np.array(A).shape[0] centrality = np.linalg.solve( np.eye(n,n) - (alpha * A) , b) if normalized: norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) else: norm = 1.0 - centrality=dict(zip(nodelist, map(float,centrality/norm))) + centrality = dict(zip(nodelist, map(float,centrality/norm))) return centrality
diff --git a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py --- a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py +++ b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -13,7 +13,7 @@ def setupClass(cls): import numpy as np except ImportError: raise SkipTest('NumPy not available.') - + def test_K5(self): """Eigenvector centrality: K5""" G=networkx.complete_graph(5) @@ -22,7 +22,7 @@ def test_K5(self): b_answer=dict.fromkeys(G,v) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) - nstart = dict([(n,1) for n in G]) + nstart = dict([(n,1) for n in G]) b=networkx.eigenvector_centrality(G,nstart=nstart) for n in sorted(G): assert_almost_equal(b[n],b_answer[n]) @@ -42,6 +42,16 @@ def test_P3(self): assert_almost_equal(b[n],b_answer[n],places=4) + def test_P3_unweighted(self): + """Eigenvector centrality: P3""" + G=networkx.path_graph(3) + b_answer={0: 0.5, 1: 0.7071, 2: 0.5} + b=networkx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert_almost_equal(b[n],b_answer[n],places=4) + + + @raises(networkx.NetworkXError) def test_maxiter(self): G=networkx.path_graph(3) @@ -58,7 +68,7 @@ def setupClass(cls): raise SkipTest('NumPy not available.') def setUp(self): - + G=networkx.DiGraph() edges=[(1,2),(1,3),(2,4),(3,2),(3,5),(4,2),(4,5),(4,6),\ @@ -67,7 +77,7 @@ def setUp(self): G.add_edges_from(edges,weight=2.0) self.G=G - self.G.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835, + self.G.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835, 0.48199885, 0.15724483, 0.51346196, 0.32475403] H=networkx.DiGraph() @@ -78,7 +88,7 @@ def setUp(self): G.add_edges_from(edges) self.H=G - self.H.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835, + self.H.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835, 0.48199885, 0.15724483, 0.51346196, 0.32475403] diff --git a/networkx/algorithms/centrality/tests/test_katz_centrality.py b/networkx/algorithms/centrality/tests/test_katz_centrality.py --- a/networkx/algorithms/centrality/tests/test_katz_centrality.py +++ b/networkx/algorithms/centrality/tests/test_katz_centrality.py @@ -194,6 +194,32 @@ def test_bad_beta_numbe(self): e = networkx.katz_centrality_numpy(G, 0.1,beta='foo') + def test_K5_unweighted(self): + """Katz centrality: K5""" + G = networkx.complete_graph(5) + alpha = 0.1 + b = networkx.katz_centrality(G, alpha, weight=None) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert_almost_equal(b[n], b_answer[n]) + nstart = dict([(n, 1) for n in G]) + b = networkx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert_almost_equal(b[n], b_answer[n], places=3) + + def test_P3_unweighted(self): + """Katz centrality: P3""" + alpha = 0.1 + G = networkx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, + 2: 0.5598852584152162} + b = networkx.katz_centrality_numpy(G, alpha, weight=None) + for n in sorted(G): + assert_almost_equal(b[n], b_answer[n], places=4) + + + class TestKatzCentralityDirected(object): def setUp(self): G = networkx.DiGraph()
Add 'weight' keyword option to eigenvector and Katz centrality Both the eigenvector_centrality() and katz_centrality() functions use edge weights if 'weight' is an attribute of the edge but don't describe that in the documentation. Also other NetworkX functions allow the user to specify the attribute keyword when calling the function with weight= including the option for weight=None. These should be the same.
2013-08-25T14:50:35
networkx/networkx
944
networkx__networkx-944
[ "849" ]
df4626c85050faf1d5b5a6118753b0baac8703ea
diff --git a/networkx/algorithms/operators/all.py b/networkx/algorithms/operators/all.py --- a/networkx/algorithms/operators/all.py +++ b/networkx/algorithms/operators/all.py @@ -1,6 +1,6 @@ """Operations on many graphs. """ -# Copyright (C) 2012 by +# Copyright (C) 2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -11,7 +11,7 @@ except ImportError: # Python3 has zip_longest from itertools import zip_longest import networkx as nx -from networkx.utils import is_string_like +#from networkx.utils import is_string_like __author__ = """\n""".join([ 'Robert King <[email protected]>', 'Aric Hagberg <[email protected]>']) @@ -55,10 +55,10 @@ def union_all(graphs, rename=(None,) , name=None): union disjoint_union_all """ - graphs_names = zip_longest(graphs,rename) + graphs_names = zip_longest(graphs, rename) U, gname = next(graphs_names) - for H,hname in graphs_names: - U = nx.union(U, H, (gname,hname),name=name) + for H, hname in graphs_names: + U = nx.union(U, H, (gname, hname), name=name) gname = None return U diff --git a/networkx/algorithms/operators/binary.py b/networkx/algorithms/operators/binary.py --- a/networkx/algorithms/operators/binary.py +++ b/networkx/algorithms/operators/binary.py @@ -1,7 +1,7 @@ """ Operations on graphs including union, intersection, difference. """ -# Copyright (C) 2004-2012 by +# Copyright (C) 2004-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -9,7 +9,7 @@ # BSD license. import networkx as nx from networkx.utils import is_string_like -__author__ = """\n""".join(['Aric Hagberg ([email protected])', +__author__ = """\n""".join(['Aric Hagberg <[email protected]>', 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) __all__ = ['union', 'compose', 'disjoint_union', 'intersection', @@ -53,6 +53,8 @@ def union(G, H, rename=(None, None), name=None): -------- disjoint_union """ + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError('G and H must both be graphs or multigraphs.') # Union is the same type as G R = G.__class__() if name is None: @@ -65,13 +67,13 @@ def add_prefix(graph, prefix): return graph def label(x): if is_string_like(x): - name=prefix+x + name = prefix+x else: - name=prefix+repr(x) + name = prefix+repr(x) return name return nx.relabel_nodes(graph, label) - G = add_prefix(G,rename[0]) - H = add_prefix(H,rename[1]) + G = add_prefix(G, rename[0]) + H = add_prefix(H, rename[1]) if set(G) & set(H): raise nx.NetworkXError('The node sets of G and H are not disjoint.', 'Use appropriate rename=(Gprefix,Hprefix)' @@ -101,7 +103,7 @@ def label(x): return R -def disjoint_union(G,H): +def disjoint_union(G, H): """ Return the disjoint union of graphs G and H. This algorithm forces distinct integer node labels. @@ -127,10 +129,10 @@ def disjoint_union(G,H): to the union graph. If a graph attribute is present in both G and H the value from H is used. """ - R1=nx.convert_node_labels_to_integers(G) - R2=nx.convert_node_labels_to_integers(H,first_label=len(R1)) - R=union(R1,R2) - R.name="disjoint_union( %s, %s )"%(G.name,H.name) + R1 = nx.convert_node_labels_to_integers(G) + R2 = nx.convert_node_labels_to_integers(H, first_label=len(R1)) + R = union(R1, R2) + R.name = "disjoint_union( %s, %s )"%(G.name, H.name) R.graph.update(G.graph) R.graph.update(H.graph) return R @@ -164,26 +166,27 @@ def intersection(G, H): >>> R.remove_nodes_from(n for n in G if n not in H) """ # create new graph - R=nx.create_empty_copy(G) + R = nx.create_empty_copy(G) - R.name="Intersection of (%s and %s)"%(G.name, H.name) - - if set(G)!=set(H): + R.name = "Intersection of (%s and %s)"%(G.name, H.name) + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError('G and H must both be graphs or multigraphs.') + if set(G) != set(H): raise nx.NetworkXError("Node sets of graphs are not equal") - if G.number_of_edges()<=H.number_of_edges(): + if G.number_of_edges() <= H.number_of_edges(): if G.is_multigraph(): - edges=G.edges_iter(keys=True) + edges = G.edges_iter(keys=True) else: - edges=G.edges_iter() + edges = G.edges_iter() for e in edges: if H.has_edge(*e): R.add_edge(*e) else: if H.is_multigraph(): - edges=H.edges_iter(keys=True) + edges = H.edges_iter(keys=True) else: - edges=H.edges_iter() + edges = H.edges_iter() for e in edges: if G.has_edge(*e): R.add_edge(*e) @@ -211,22 +214,24 @@ def difference(G, H): with the attributes (including edge data) from G use remove_nodes_from() as follows: - >>> G=nx.path_graph(3) - >>> H=nx.path_graph(5) - >>> R=G.copy() + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() >>> R.remove_nodes_from(n for n in G if n in H) """ # create new graph - R=nx.create_empty_copy(G) - R.name="Difference of (%s and %s)"%(G.name, H.name) + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError('G and H must both be graphs or multigraphs.') + R = nx.create_empty_copy(G) + R.name = "Difference of (%s and %s)"%(G.name, H.name) - if set(G)!=set(H): + if set(G) != set(H): raise nx.NetworkXError("Node sets of graphs not equal") if G.is_multigraph(): - edges=G.edges_iter(keys=True) + edges = G.edges_iter(keys=True) else: - edges=G.edges_iter() + edges = G.edges_iter() for e in edges: if not H.has_edge(*e): R.add_edge(*e) @@ -252,21 +257,23 @@ def symmetric_difference(G, H): graph. """ # create new graph - R=nx.create_empty_copy(G) - R.name="Symmetric difference of (%s and %s)"%(G.name, H.name) + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError('G and H must both be graphs or multigraphs.') + R = nx.create_empty_copy(G) + R.name = "Symmetric difference of (%s and %s)"%(G.name, H.name) - if set(G)!=set(H): + if set(G) != set(H): raise nx.NetworkXError("Node sets of graphs not equal") - gnodes=set(G) # set of nodes in G - hnodes=set(H) # set of nodes in H - nodes=gnodes.symmetric_difference(hnodes) + gnodes = set(G) # set of nodes in G + hnodes = set(H) # set of nodes in H + nodes = gnodes.symmetric_difference(hnodes) R.add_nodes_from(nodes) if G.is_multigraph(): - edges=G.edges_iter(keys=True) + edges = G.edges_iter(keys=True) else: - edges=G.edges_iter() + edges = G.edges_iter() # we could copy the data here but then this function doesn't # match intersection and difference for e in edges: @@ -274,9 +281,9 @@ def symmetric_difference(G, H): R.add_edge(*e) if H.is_multigraph(): - edges=H.edges_iter(keys=True) + edges = H.edges_iter(keys=True) else: - edges=H.edges_iter() + edges = H.edges_iter() for e in edges: if not G.has_edge(*e): R.add_edge(*e) @@ -305,18 +312,20 @@ def compose(G, H, name=None): It is recommended that G and H be either both directed or both undirected. Attributes from H take precedent over attributes from G. """ + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError('G and H must both be graphs or multigraphs.') if name is None: - name="compose( %s, %s )"%(G.name,H.name) - R=G.__class__() - R.name=name + name = "compose( %s, %s )"%(G.name,H.name) + R = G.__class__() + R.name = name R.add_nodes_from(H.nodes()) R.add_nodes_from(G.nodes()) if H.is_multigraph(): - R.add_edges_from(H.edges_iter(keys=True,data=True)) + R.add_edges_from(H.edges_iter(keys=True, data=True)) else: R.add_edges_from(H.edges_iter(data=True)) if G.is_multigraph(): - R.add_edges_from(G.edges_iter(keys=True,data=True)) + R.add_edges_from(G.edges_iter(keys=True, data=True)) else: R.add_edges_from(G.edges_iter(data=True)) diff --git a/networkx/algorithms/operators/unary.py b/networkx/algorithms/operators/unary.py --- a/networkx/algorithms/operators/unary.py +++ b/networkx/algorithms/operators/unary.py @@ -1,13 +1,12 @@ """Unary operations on graphs""" -# Copyright (C) 2004-2012 by +# Copyright (C) 2004-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. import networkx as nx -from networkx.utils import is_string_like -__author__ = """\n""".join(['Aric Hagberg ([email protected])', +__author__ = """\n""".join(['Aric Hagberg <[email protected]>', 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) __all__ = ['complement', 'reverse'] @@ -35,11 +34,11 @@ def complement(G, name=None): Graph, node, and edge data are not propagated to the new graph. """ if name is None: - name="complement(%s)"%(G.name) - R=G.__class__() - R.name=name + name = "complement(%s)"%(G.name) + R = G.__class__() + R.name = name R.add_nodes_from(G) - R.add_edges_from( ((n,n2) + R.add_edges_from( ((n, n2) for n,nbrs in G.adjacency_iter() for n2 in G if n2 not in nbrs if n != n2) )
diff --git a/networkx/algorithms/operators/tests/test_all.py b/networkx/algorithms/operators/tests/test_all.py --- a/networkx/algorithms/operators/tests/test_all.py +++ b/networkx/algorithms/operators/tests/test_all.py @@ -165,3 +165,32 @@ def test_input_output(): l = [nx.Graph([(1,2)]),nx.Graph([(1,2)])] R = nx.intersection_all(l) assert_equal(len(l),2) + + +@raises(nx.NetworkXError) +def test_mixed_type_union(): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.union_all([G,H,I]) + +@raises(nx.NetworkXError) +def test_mixed_type_disjoint_union(): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.disjoint_union_all([G,H,I]) + +@raises(nx.NetworkXError) +def test_mixed_type_intersection(): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.intersection_all([G,H,I]) + +@raises(nx.NetworkXError) +def test_mixed_type_compose(): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.compose_all([G,H,I]) diff --git a/networkx/algorithms/operators/tests/test_binary.py b/networkx/algorithms/operators/tests/test_binary.py --- a/networkx/algorithms/operators/tests/test_binary.py +++ b/networkx/algorithms/operators/tests/test_binary.py @@ -268,3 +268,41 @@ def test_compose_multigraph(): assert_equal( set(GH) , set(G)|set(H)) assert_equal( set(GH.edges(keys=True)) , set(G.edges(keys=True))|set(H.edges(keys=True))) + + +@raises(nx.NetworkXError) +def test_mixed_type_union(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.union(G,H) + +@raises(nx.NetworkXError) +def test_mixed_type_disjoint_union(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.disjoint_union(G,H) + +@raises(nx.NetworkXError) +def test_mixed_type_intersection(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.intersection(G,H) + +@raises(nx.NetworkXError) +def test_mixed_type_difference(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.difference(G,H) + + +@raises(nx.NetworkXError) +def test_mixed_type_symmetric_difference(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.symmetric_difference(G,H) + +@raises(nx.NetworkXError) +def test_mixed_type_compose(): + G = nx.Graph() + H = nx.MultiGraph() + U = nx.compose(G,H)
difference fails when applied to MultiDiGraph and Graph Take G as a MultiDiGraph Get a spanning tree assuming it is undirected ``` python T = nx.minimum_spanning_tree (nx.MultiGraph (G)) ``` Take the difference to (almost) get the chords of G ``` python nx.difference (G,T) ``` The following error is produced ``` python --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-77-8d39870ba2cc> in <module>() ----> 1 nx.difference(G,T) /usr/lib/pymodules/python2.7/networkx/algorithms/operators/binary.pyc in difference(G, H) 223 edges=G.edges_iter() 224 for e in edges: --> 225 if not H.has_edge(*e): 226 R.add_edge(*e) 227 return R TypeError: has_edge() takes exactly 3 arguments (4 given) ``` This is due to the fact that the iterator edges is created with key=True, but since T is just a Graph (should be MultiGraph) the method has_edge fails.
The following patch fixes the problem. I think most binary operations have to be fixed this way. ``` patch # HG changeset patch # User Juan Pablo Carbajal <[email protected]> # Date 1361641591 -3600 # Node ID 0f47bd8643caab842eed98e5fda6fadfe65c7ef0 # Parent 94139db0a8fe39c832ba1779b1c17088c7440e1d difference: compatibility between graphs and multigraphs diff --git a/networkx/algorithms/operators/binary.py b/networkx/algorithms/operators/binary.py --- a/networkx/algorithms/operators/binary.py +++ b/networkx/algorithms/operators/binary.py @@ -225,6 +225,7 @@ if G.is_multigraph(): edges=G.edges_iter(keys=True) + H = nx.MultiGraph (H) else: edges=G.edges_iter() for e in edges: ``` That fix might work. But you could also in your case call the function with ``` nx.difference (G,nx.MultiGraph(T)) ``` to have the same effect. I wonder if it might be safer instead to forbid comparisons between graphs and multigraphs. That way there wouldn't be ambiguity in the case, e.g. when the multigraph G has two edges between a pair of nodes and the graph H only one. I could see an argument for the difference() to be either 1 edge or 0 edges. If we force the user to "cast" one of the graphs to the same type then it would be the users choice. Since there is an if for multigrpah inputs, and th eidea is that an edge of a multigraph is univocally indetified with its key (that is why keys=true) then the difference is well defined. That sems to be the original intention of the if, it seem that the cast was missing. But maybe I am wrong. Question: Can two edges of a multigraph joining the sames nodes have the same key? It seems to me they can't. They keys used for MultiGraph() edges differentiate the edges. So they can't be the same (if you try to add another edge with the same key ``` python In [1]: import networkx as nx In [2]: G = nx.MultiGraph() In [3]: G.add_edge(1,2,key='a') In [4]: G.edges(keys=True) Out[4]: [(1, 2, 'a')] In [5]: G.add_edge(1,2,key='a') In [6]: G.edges(keys=True) Out[6]: [(1, 2, 'a')] In [7]: G.add_edge(1,2,key='b') In [8]: G.edges(keys=True) Out[8]: [(1, 2, 'a'), (1, 2, 'b')] ``` I'm not sure we thought very carefully how the binary operators work with graphs and multigraphs when we wrote them. When I look at it now it seems there could be some ambiguity (as above) in some cases when mixing graphs and multigraphs. I was suggesting maybe we resolve that by disallowing the mixed case. But I'm open to other suggestions too. Hi, Yes , that was what I understood. I would say that whenever the operator is well defined (no ambiguities as you pointed out) then the operator should be allowed. Looking at the code I thought that the multigraph case it was just not complete. In the mixed case I am using (a multigraph and a tree derived from it) the keys of the edges are conserved, then the difference is well defined. In a general case it mgith indeed be ambiguous, but I can't put the finger on it. Do you have an example where the difference between a multigraph and a graph would give an ambiguous result? Or maybe one could just issue a warning for the mixed case! "Warning: difference between different types of graphs may give unexpected results." I'm thinking of something like this: ``` python In [1]: import networkx as nx In [2]: G = nx.MultiGraph() In [3]: G.add_edge(1,2,key='a') In [4]: G.add_edge(1,2,key='b') In [5]: H = nx.Graph() In [6]: H.add_edge(1,2) ``` These two results might be confusing: ``` In [8]: nx.difference(H,G).edges() Out[8]: [] ``` (and if we use your suggested fix) ``` In [9]: nx.difference(G,nx.MultiGraph(H)).edges() Out[9]: [(1, 2), (1, 2)] ``` You could argue that it is the right answer but I think it could be simpler (for sure in the code) and safer to not allow mixed type graphs. Indeed, as I see it. When you cast H to a multigraph the edge receives the default key (a "0") therefore the answer is right. Specially when you check the keys (that is the edge of a multigraph is composed by the nodes and the key). If you do ``` python import networkx as nx G = nx.MultiGraph() G.add_edge(1,2,key=0) G.add_edge(1,2,key='a') H = nx.Graph() H.add_edge(1,2) nx.difference(G,nx.MultiGraph(H)).edges(keys=True) ``` You get the right answer. So if not forbidden one could issue a warning. If forbidden one should report the right error and not just let the function fail (in this case the if would have another if checking whether H is a multigraph and if it is not then issue the corresponding error). Any solution is fine with me. Given all of the logic/errors/warnings that would need to be put in the code I'm still in favor of raising an error if the two graphs don't match types. That way the user will have to make a decision about what it means to compare edges of graphs and multigraphs with edge keys. But I think it is better that the user do that than for us try to guess or catch all of the possible cases. Sounds good. I am going through some hectic time right now. But I could send you a patch in the following weeks.
2013-08-25T16:24:06
networkx/networkx
954
networkx__networkx-954
[ "954" ]
a0ccc009eb95a70e3668e08f3ce5f72f156db215
diff --git a/networkx/drawing/nx_pylab.py b/networkx/drawing/nx_pylab.py --- a/networkx/drawing/nx_pylab.py +++ b/networkx/drawing/nx_pylab.py @@ -292,8 +292,7 @@ def draw_networkx_nodes(G, pos, pos : dictionary A dictionary with nodes as keys and positions as values. - If not specified a spring layout positioning will be computed. - See networkx.layout for functions that compute node positions. + Positions should be sequences of length 2. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. @@ -416,8 +415,7 @@ def draw_networkx_edges(G, pos, pos : dictionary A dictionary with nodes as keys and positions as values. - If not specified a spring layout positioning will be computed. - See networkx.layout for functions that compute node positions. + Positions should be sequences of length 2. edgelist : collection of edge tuples Draw only specified edges(default=G.edges()) @@ -642,10 +640,9 @@ def draw_networkx_labels(G, pos, G : graph A networkx graph - pos : dictionary, optional + pos : dictionary A dictionary with nodes as keys and positions as values. - If not specified a spring layout positioning will be computed. - See networkx.layout for functions that compute node positions. + Positions should be sequences of length 2. labels : dictionary, optional (default=None) Node labels in a dictionary keyed by node of text labels @@ -749,10 +746,9 @@ def draw_networkx_edge_labels(G, pos, G : graph A networkx graph - pos : dictionary, optional + pos : dictionary A dictionary with nodes as keys and positions as values. - If not specified a spring layout positioning will be computed. - See networkx.layout for functions that compute node positions. + Positions should be sequences of length 2. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes.
Documentation for draw_networkx_edge_labels is wrong The documentation for `draw_networkx_edge_labels` says that the pos label is optional, and the spring layout is used by default. However, I can't call the function with only the graph as the first argument. I get an error saying that it takes two arguments.
2013-09-14T19:39:45
networkx/networkx
1,038
networkx__networkx-1038
[ "1037" ]
db43c87081a3bd2b8cd92665110c2a4736cbf09b
diff --git a/networkx/algorithms/traversal/breadth_first_search.py b/networkx/algorithms/traversal/breadth_first_search.py --- a/networkx/algorithms/traversal/breadth_first_search.py +++ b/networkx/algorithms/traversal/breadth_first_search.py @@ -5,23 +5,50 @@ Basic algorithms for breadth-first searching. """ -__author__ = """\n""".join(['Aric Hagberg <[email protected]>']) - -__all__ = ['bfs_edges', 'bfs_tree', - 'bfs_predecessors', 'bfs_successors'] - import networkx as nx from collections import defaultdict, deque +__author__ = """\n""".join(['Aric Hagberg <[email protected]>']) +__all__ = ['bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors'] def bfs_edges(G, source, reverse=False): - """Produce edges in a breadth-first-search starting at source.""" - # Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py - # by D. Eppstein, July 2004. + """Produce edges in a breadth-first-search starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for breadth-first search and return edges in + the component reachable from source. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + Returns + ------- + edges: generator + A generator of edges in the breadth-first-search. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(list(nx.bfs_edges(G,0))) + [(0, 1), (1, 2)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ if reverse and isinstance(G, nx.DiGraph): neighbors = G.predecessors_iter else: neighbors = G.neighbors_iter - visited=set([source]) + visited = set([source]) queue = deque([(source, neighbors(source))]) while queue: parent, children = queue[0] @@ -35,19 +62,110 @@ def bfs_edges(G, source, reverse=False): queue.popleft() def bfs_tree(G, source, reverse=False): - """Return directed tree of breadth-first-search from source.""" + """Return an oriented tree constructed from of a breadth-first-search + starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for breadth-first search and return edges in + the component reachable from source. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + Returns + ------- + T: NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(list(nx.bfs_edges(G,0))) + [(0, 1), (1, 2)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ T = nx.DiGraph() T.add_node(source) T.add_edges_from(bfs_edges(G,source,reverse=reverse)) return T def bfs_predecessors(G, source): - """Return dictionary of predecessors in breadth-first-search from source.""" + """Return dictionary of predecessors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for breadth-first search and return edges in + the component reachable from source. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(nx.bfs_predecessors(G,0)) + {1: 0, 2: 1} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ return dict((t,s) for s,t in bfs_edges(G,source)) def bfs_successors(G, source): - """Return dictionary of successors in breadth-first-search from source.""" - d=defaultdict(list) + """Return dictionary of successors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for breadth-first search and return edges in + the component reachable from source. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of succssors nodes as values. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(nx.bfs_successors(G,0)) + {0: [1], 1: [2]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ + d = defaultdict(list) for s,t in bfs_edges(G,source): d[s].append(t) return dict(d) diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py --- a/networkx/algorithms/traversal/depth_first_search.py +++ b/networkx/algorithms/traversal/depth_first_search.py @@ -8,26 +8,51 @@ Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py by D. Eppstein, July 2004. """ -__author__ = """\n""".join(['Aric Hagberg <[email protected]>']) - +import networkx as nx +from collections import defaultdict +__author__ = """\n""".join(['Aric Hagberg <[email protected]>']) __all__ = ['dfs_edges', 'dfs_tree', 'dfs_predecessors', 'dfs_successors', 'dfs_preorder_nodes','dfs_postorder_nodes', 'dfs_labeled_edges'] -import networkx as nx -from collections import defaultdict +def dfs_edges(G, source=None): + """Produce edges in a depth-first-search (DFS). -def dfs_edges(G,source=None): - """Produce edges in a depth-first-search starting at source.""" - # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - # by D. Eppstein, July 2004. + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + edges: generator + A generator of edges in the depth-first-search. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(list(nx.dfs_edges(G,0))) + [(0, 1), (1, 2)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ if source is None: # produce edges for all components - nodes=G + nodes = G else: # produce edges for components with source - nodes=[source] + nodes = [source] visited=set() for start in nodes: if start in visited: @@ -46,7 +71,28 @@ def dfs_edges(G,source=None): stack.pop() def dfs_tree(G, source): - """Return directed tree of depth-first-search from source.""" + """Return oriented tree constructed from a depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + Returns + ------- + T : NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> T = nx.dfs_tree(G,0) + >>> print(T.edges()) + [(0, 1), (1, 2)] + """ T = nx.DiGraph() if source is None: T.add_nodes_from(G) @@ -56,13 +102,71 @@ def dfs_tree(G, source): return T def dfs_predecessors(G, source=None): - """Return dictionary of predecessors in depth-first-search from source.""" + """Return dictionary of predecessors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(nx.dfs_predecessors(G,0)) + {1: 0, 2: 1} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ return dict((t,s) for s,t in dfs_edges(G,source=source)) def dfs_successors(G, source=None): - """Return dictionary of successors in depth-first-search from source.""" - d=defaultdict(list) + """Return dictionary of successors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of successor nodes as values. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(nx.dfs_successors(G,0)) + {0: [1], 1: [2]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ + d = defaultdict(list) for s,t in dfs_edges(G,source=source): d[s].append(t) return dict(d) @@ -71,36 +175,121 @@ def dfs_successors(G, source=None): def dfs_postorder_nodes(G,source=None): """Produce nodes in a depth-first-search post-ordering starting from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search post-ordering. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(list(nx.dfs_postorder_nodes(G,0))) + [2, 1, 0] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. """ post=(v for u,v,d in nx.dfs_labeled_edges(G,source=source) if d['dir']=='reverse') - # chain source to end of pre-ordering -# return chain(post,[source]) + # potential modification: chain source to end of post-ordering + # return chain(post,[source]) return post -def dfs_preorder_nodes(G,source=None): - """Produce nodes in a depth-first-search pre-ordering starting at source.""" +def dfs_preorder_nodes(G, source=None): + """Produce nodes in a depth-first-search pre-ordering starting + from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search pre-ordering. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> print(list(nx.dfs_preorder_nodes(G,0))) + [0, 1, 2] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + """ pre=(v for u,v,d in nx.dfs_labeled_edges(G,source=source) if d['dir']=='forward') - # chain source to beginning of pre-ordering -# return chain([source],pre) + # potential modification: chain source to beginning of pre-ordering + # return chain([source],pre) return pre -def dfs_labeled_edges(G,source=None): - """Produce edges in a depth-first-search starting at source and - labeled by direction type (forward, reverse, nontree). +def dfs_labeled_edges(G, source=None): + """Produce edges in a depth-first-search (DFS) labeled by type. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + Returns + ------- + edges: generator + A generator of edges in the depth-first-search labeled with 'forward', + 'nontree', and 'reverse'. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_path([0,1,2]) + >>> edges = (list(nx.dfs_labeled_edges(G,0))) + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + by D. Eppstein, July 2004. + + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. """ # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py # by D. Eppstein, July 2004. if source is None: # produce edges for all components - nodes=G + nodes = G else: # produce edges for components with source - nodes=[source] - visited=set() + nodes = [source] + visited = set() for start in nodes: if start in visited: continue
dfs_predecessors not working as expected Hi, I have the following simple program (NetworkX 1.8.1 installed via pip on ubuntu 12.04, python 2.7.3) G = nx.DiGraph() G.add_edge('a','b') G.add_edge('b','c') G.add_edge('c','d') print nx.dfs_successors(G,'a') print nx.dfs_predecessors(G,'a') print nx.dfs_successors(G,'d') print nx.dfs_predecessors(G,'d') I expect as output {'a': ['b'], 'c': ['d'], 'b': ['c']} {} {} {'a': ['b'], 'c': ['d'], 'b': ['c']} but the outcome is {'a': ['b'], 'c': ['d'], 'b': ['c']} {'c': 'b', 'b': 'a', 'd': 'c'} {} {} It seems that something is wrong with the dfs_predecessors function.
The algorithm runs a DFS from the node you specify and outputs dictionaries of predecessors and successors. When you start with node 'a' the DFS is A->B->C->D - you get the successors by following the head of the arrow and predecessors by following the tail. When you start with node 'd' the DFS is empty since there are no nodes reachable from 'd'. Ok, now I understand how it works. Maybe you can add this example to the documentation, I really thought "predecessors" meant that the dfs was run on reverse, like with the ancestors function. (I can do it if you tell me how) I still have one question though, why the return of dfs_successors has lists as values and the return of dfs_predecessors has nodes? P.D: Thanks for the quick answer! Good idea to add an example. It might make sense to have predecessors return a list to match successors - it's not a list because there can be only one predecessor in a DFS.
2013-12-22T16:26:41
networkx/networkx
1,045
networkx__networkx-1045
[ "848" ]
add3c3a88a6744cd834aa16ce9cd60120e67adda
diff --git a/networkx/readwrite/json_graph/__init__.py b/networkx/readwrite/json_graph/__init__.py --- a/networkx/readwrite/json_graph/__init__.py +++ b/networkx/readwrite/json_graph/__init__.py @@ -1,8 +1,16 @@ """ ********* -JSON data +JSON data ********* Generate and parse JSON serializable data for NetworkX graphs. + +These formats are suitable for use with the d3.js examples http://d3js.org/ + +The three formats that you can generate with NetworkX are: + + - node-link like in the d3.js example http://bl.ocks.org/mbostock/4062045 + - tree like in the d3.js example http://bl.ocks.org/mbostock/4063550 + - adjacency like in the d3.js example http://bost.ocks.org/mike/miserables/ """ from networkx.readwrite.json_graph.node_link import * from networkx.readwrite.json_graph.adjacency import *
Need JSON format description The page on JSON serialization lacks information about the actual structure of produced data. This make it hard to see if networkx is a suitable tool for a backend of already existing JavaScript front. http://networkx.lanl.gov/reference/readwrite.json_graph.html
They are the format that the D3.js package uses. And you are right there should be some documentation. Can you clarify? This answer says that D3.js doesn't use any specific format - http://stackoverflow.com/a/11089330 That is true. There are a few different formats of JSON serialization hat are used in the d3.js examples and you can, as @mbostoc points out, parse CSV data or whatever you want. The three formats that you can generate with NetworkX are - node-link like in the d3.js example http://bl.ocks.org/mbostock/4062045 - tree like in the d3.js example http://bl.ocks.org/mbostock/4063550 - adjacency like in the d3.js example http://bost.ocks.org/mike/miserables/ Perhaps some mention of the three available types, with outbound links to the blocks listed would suffice in the docs. I had a real easy time building a force directed d3 graph using XML parsed into networkx for modeling for a free-hack day at work.
2014-01-04T22:06:13
networkx/networkx
1,064
networkx__networkx-1064
[ "1063" ]
bfb1dae3a9fda611b5e5e2c635d234a026052487
diff --git a/networkx/readwrite/gexf.py b/networkx/readwrite/gexf.py --- a/networkx/readwrite/gexf.py +++ b/networkx/readwrite/gexf.py @@ -212,7 +212,13 @@ class GEXF(object): xml_type = dict(types) python_type = dict(reversed(a) for a in types) - convert_bool={'false': False, 'False': False, 'true': True, 'True': True} + # http://www.w3.org/TR/xmlschema-2/#boolean + convert_bool = { + 'true': True, 'false': False, + 'True': True, 'False': False, + '0': False, 0: False, + '1': False, 1: True + } # try: # register_namespace = ET.register_namespace @@ -643,7 +649,7 @@ def make_graph(self, graph_xml): G.graph['mode']='dynamic' else: G.graph['mode']='static' - + # timeformat self.timeformat=graph_xml.get('timeformat') if self.timeformat == 'date': diff --git a/networkx/readwrite/graphml.py b/networkx/readwrite/graphml.py --- a/networkx/readwrite/graphml.py +++ b/networkx/readwrite/graphml.py @@ -221,10 +221,13 @@ class GraphML(object): xml_type = dict(types) python_type = dict(reversed(a) for a in types) - convert_bool={'true':True,'false':False, - 'True': True, 'False': False} - - + # http://www.w3.org/TR/xmlschema-2/#boolean + convert_bool = { + 'true': True, 'false': False, + 'True': True, 'False': False, + '0': False, 0: False, + '1': False, 1: True + } class GraphMLWriter(GraphML): def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
extend convert_bool in gexf.py and graphml.py to all valid boolean Follow-up to pull request #971 - recommend extending `convert_bool` to include true/false support for literal 0/1 and the string variants. 1. [graphml.py:224](https://github.com/networkx/networkx/blob/7d9682a07dcae30acab3c4841e33d31f727a3fb2/networkx/readwrite/graphml.py#L224) 2. [gexf.py:215](https://github.com/networkx/networkx/blob/87764469941cfa52a5aefc1b1b381a614b484c8d/networkx/readwrite/gexf.py#L215) E.g., in [graphml.py:224](https://github.com/networkx/networkx/blob/7d9682a07dcae30acab3c4841e33d31f727a3fb2/networkx/readwrite/graphml.py#L224): Old: ``` python convert_bool={'true':True,'false':False, 'True': True, 'False': False} ``` New: ``` python convert_bool={'true':True,'false':False, 'True': True, 'False': False '0': False, 0: False, '1': True, 1: True} ``` For reference, the [XML Schema Datatypes](http://www.w3.org/TR/xmlschema-2/#boolean) section on booleans.
2014-02-21T01:04:10
networkx/networkx
1,078
networkx__networkx-1078
[ "1075" ]
2efd327800076adac21eaca5b9b97bea0f18749f
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py --- a/networkx/convert_matrix.py +++ b/networkx/convert_matrix.py @@ -1,27 +1,26 @@ -"""Functions to convert NetworkX graphs to and from other formats. +"""Functions to convert NetworkX graphs to and from numpy/scipy matrices. The preferred way of converting data to a NetworkX graph is through the graph constuctor. The constructor calls the to_networkx_graph() function which attempts to guess the input type and convert it automatically. Examples - - +-------- Create a 10 node random graph from a numpy matrix >>> import numpy ->>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10)) ->>> D=nx.DiGraph(a) +>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10)) +>>> D = nx.DiGraph(a) or equivalently ->>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph()) +>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph()) See Also -------- nx_pygraphviz, nx_pydot """ -# Copyright (C) 2006-2013 by +# Copyright (C) 2006-2014 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> @@ -80,7 +79,7 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, Returns ------- M : NumPy matrix - Graph adjacency matrix. + Graph adjacency matrix See Also -------- @@ -88,14 +87,30 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, Notes ----- - The matrix entries are assigned with weight edge attribute. When - an edge does not have the weight attribute, the value of the entry is 1. - For multiple edges, the values of the entries are the sums of the edge - attributes for each edge. + The matrix entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' paramter. The default is to + sum the weight attributes for each of the parallel edges. When `nodelist` does not contain every node in `G`, the matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attributr of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Numpy matrix can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1,1)]) + >>> A = nx.to_numpy_matrix(G) + >>> A + matrix([[ 1.]]) + >>> A.A[np.diag_indices_from(A)] *= 2 + >>> A + matrix([[ 2.]]) + Examples -------- >>> G = nx.MultiDiGraph() @@ -394,6 +409,21 @@ def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, Uses coo_matrix format. To convert to other formats specify the format= keyword. + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Scipy sparse matrix can be modified as follows: + + >>> import scipy as sp + >>> G = nx.Graph([(1,1)]) + >>> A = nx.to_scipy_sparse_matrix(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal()*2) + >>> print(A.todense()) + [[2]] + Examples -------- >>> G = nx.MultiDiGraph() @@ -435,8 +465,20 @@ def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, shape=(nlen,nlen), dtype=dtype) else: # symmetrize matrix - M = sparse.coo_matrix((data+data, (row+col,col+row)), - shape=(nlen,nlen), dtype=dtype) + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = G.selfloop_edges(data=True) + if selfloops: + diag_index,diag_data = zip(*((index[u],-d.get(weight,1)) + for u,v,d in selfloops + if u in index and v in index)) + d += diag_data + r += diag_index + c += diag_index + M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype) try: return M.asformat(format) except AttributeError: diff --git a/networkx/linalg/graphmatrix.py b/networkx/linalg/graphmatrix.py --- a/networkx/linalg/graphmatrix.py +++ b/networkx/linalg/graphmatrix.py @@ -131,12 +131,28 @@ def adjacency_matrix(G, nodelist=None, weight='weight'): dictionary-of-dictionaries format that can be addressed as a sparse matrix. - For MultiGraph/MultiDiGraph, the edges weights are summed. + For MultiGraph/MultiDiGraph with parallel edges the weights are summed. See to_numpy_matrix for other options. + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the edge weight attribute + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Scipy sparse matrix can be modified as follows: + + >>> import scipy as sp + >>> G = nx.Graph([(1,1)]) + >>> A = nx.adjacency_matrix(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal()*2) + >>> print(A.todense()) + [[2]] + See Also -------- to_numpy_matrix + to_scipy_sparse_matrix to_dict_of_dicts """ return nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight) @@ -147,6 +163,6 @@ def adjacency_matrix(G, nodelist=None, weight='weight'): def setup_module(module): from nose import SkipTest try: - import numpy + import scipy except: - raise SkipTest("NumPy not available") + raise SkipTest("SciPy not available") diff --git a/networkx/linalg/laplacianmatrix.py b/networkx/linalg/laplacianmatrix.py --- a/networkx/linalg/laplacianmatrix.py +++ b/networkx/linalg/laplacianmatrix.py @@ -117,10 +117,6 @@ def normalized_laplacian_matrix(G, nodelist=None, weight='weight'): nodelist = G.nodes() A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format='csr') - # the convention for normalized Laplacian is to not count self loops - # twice in the diagonal. So we remove one here. - for n,_ in G.selfloop_edges(): - A[n,n] -= 1 n,m = A.shape diags = A.sum(axis=1).flatten() D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
diff --git a/networkx/tests/test_convert_scipy.py b/networkx/tests/test_convert_scipy.py --- a/networkx/tests/test_convert_scipy.py +++ b/networkx/tests/test_convert_scipy.py @@ -177,3 +177,13 @@ def test_ordering(self): G.add_edge(3,1) M = nx.to_scipy_sparse_matrix(G,nodelist=[3,2,1]) np_assert_equal(M.todense(), np.matrix([[0,0,1],[1,0,0],[0,1,0]])) + + def test_selfloop_graph(self): + G = nx.Graph([(1,1)]) + M = nx.to_scipy_sparse_matrix(G) + np_assert_equal(M.todense(), np.matrix([[1]])) + + def test_selfloop_digraph(self): + G = nx.DiGraph([(1,1)]) + M = nx.to_scipy_sparse_matrix(G) + np_assert_equal(M.todense(), np.matrix([[1]]))
sparse matrix todense changes diag I was adding in code for community detection, and all my tests started failing. Found this behaviour when roundtripping from_numpy_matrix, and adj_matrix: In [63]: jnk = np.eye(10) array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) In [64]: graph = networkx.from_numpy_matrix(jnk) In [65]: mat = networkx.adjacency_matrix(graph).todense() In [66]: mat Out[66]: matrix([[ 2., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 2., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 2., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 2., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 2., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 2., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 2., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 2., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 2., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 2.]]) In [67]: graph = networkx.from_numpy_matrix(mat) In [68]: mat2 = networkx.adjacency_matrix(graph).todense() In [69]: mat2 Out[69]: matrix([[ 4., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 4., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 4., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 4., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 4., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 4., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 4., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 4., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 4., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 4.]]) Thoughts?
Looks like self loops are getting handled incorrectly (doubled). Thanks for reporting this.
2014-03-15T14:20:46
networkx/networkx
1,098
networkx__networkx-1098
[ "1087" ]
c8ada350b11fc71e747d10f8ea93b4f43659b6ae
diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py deleted file mode 100644 --- a/examples/drawing/atlas.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -""" -Atlas of all graphs of 6 nodes or less. - -""" -__author__ = """Aric Hagberg ([email protected])""" -# Copyright (C) 2004 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. - -import networkx as nx -#from networkx import * -#from networkx.generators.atlas import * -from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic -import random - -def atlas6(): - """ Return the atlas of all connected graphs of 6 nodes or less. - Attempt to check for isomorphisms and remove. - """ - - Atlas=nx.graph_atlas_g()[0:208] # 208 - # remove isolated nodes, only connected graphs are left - U=nx.Graph() # graph for union of all graphs in atlas - for G in Atlas: - zerodegree=[n for n in G if G.degree(n)==0] - for n in zerodegree: - G.remove_node(n) - U=nx.disjoint_union(U,G) - - # list of graphs of all connected components - C=nx.connected_component_subgraphs(U) - - UU=nx.Graph() - # do quick isomorphic-like check, not a true isomorphism checker - nlist=[] # list of nonisomorphic graphs - for G in C: - # check against all nonisomorphic graphs so far - if not iso(G,nlist): - nlist.append(G) - UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs - return UU - -def iso(G1, glist): - """Quick and dirty nonisomorphism checker used to check isomorphisms.""" - for G2 in glist: - if isomorphic(G1,G2): - return True - return False - - -if __name__ == '__main__': - - import networkx as nx - - G=atlas6() - - print("graph has %d nodes with %d edges"\ - %(nx.number_of_nodes(G),nx.number_of_edges(G))) - print(nx.number_connected_components(G),"connected components") - - - try: - from networkx import graphviz_layout - except ImportError: - raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot") - - import matplotlib.pyplot as plt - plt.figure(1,figsize=(8,8)) - # layout graphs with positions using graphviz neato - pos=nx.graphviz_layout(G,prog="neato") - # color nodes the same in each connected subgraph - C=nx.connected_component_subgraphs(G) - for g in C: - c=[random.random()]*nx.number_of_nodes(g) # random color... - nx.draw(g, - pos, - node_size=40, - node_color=c, - vmin=0.0, - vmax=1.0, - with_labels=False - ) - plt.savefig("atlas.png",dpi=75) diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py new file mode 120000 --- /dev/null +++ b/examples/drawing/atlas.py @@ -0,0 +1 @@ +../graph/atlas.py \ No newline at end of file
doc build broken From a clean checkout I ran `python setup.py install` and then I attempted to build a local copy of the docs via `make html` in `doc` and got the following error: ``` (py2k-base)tcaswell@tcaswellpc1:~/other_source/networkx/doc$ make html mkdir -p build ./make_gallery.py atlas.pyTraceback (most recent call last): File "./make_gallery.py", line 57, in <module> execfile(example) File "atlas.py", line 59, in <module> G=atlas6() File "atlas.py", line 25, in atlas6 Atlas=nx.graph_atlas_g()[0:208] # 208 AttributeError: 'module' object has no attribute 'graph_atlas_g' make: *** [build/generate-stamp] Error 1 ```
2014-04-08T16:01:47
networkx/networkx
1,276
networkx__networkx-1276
[ "1275" ]
5ff47ea7927ea45e88ca2a8fd4a13728298ca3e2
diff --git a/networkx/algorithms/cluster.py b/networkx/algorithms/cluster.py --- a/networkx/algorithms/cluster.py +++ b/networkx/algorithms/cluster.py @@ -136,7 +136,7 @@ def average_clustering(G, nodes=None, weight=None, count_zeros=True): The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. - count_zeros : bool (default=False) + count_zeros : bool If False include only the nodes with nonzero clustering in the average. Returns
average_clustering count_zeros parameter True by default but stated as False in docstring Compare: https://github.com/networkx/networkx/blob/a822b491c6b53a2819915a8c620e97a36ba6c060/networkx/algorithms/cluster.py#L117 with: https://github.com/networkx/networkx/blob/a822b491c6b53a2819915a8c620e97a36ba6c060/networkx/algorithms/cluster.py#L139 The former gives the correct indication that `count_zeros` is `True` by default, while the latter which one might come across while browsing the documentation suggests incorrectly the opposite i.e. that `count_zeros` is `False` by default. I'd suggest that documenting the default value manually in the docstring is redundant anyway and so should be omitted completely rather than corrected to read `True`. I can fix this.
2014-10-28T08:06:43
networkx/networkx
1,309
networkx__networkx-1309
[ "1302" ]
e9e3913981e6efa6e5b710a174067846b304f5fd
diff --git a/networkx/generators/geometric.py b/networkx/generators/geometric.py --- a/networkx/generators/geometric.py +++ b/networkx/generators/geometric.py @@ -242,8 +242,8 @@ def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0,0,1,1)): G.add_nodes_from(range(n)) (xmin,ymin,xmax,ymax)=domain for n in G: - G.node[n]['pos']=((xmin + (xmax-xmin))*random.random(), - (ymin + (ymax-ymin))*random.random()) + G.node[n]['pos']=(xmin + ((xmax-xmin)*random.random()), + ymin + ((ymax-ymin)*random.random())) if L is None: # find maximum distance L between two nodes l = 0
waxman_model's domain problem I think `waxman_model` doesn't process the domain correctly. networkx/geometric.py (version 1.9.1) --- Original version: ``` python def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0,0,1,1)): ... for n in G: G.node[n]['pos']=((xmin + (xmax-xmin))*random.random(), (ymin + (ymax-ymin))*random.random()) ``` Fixed version: ``` python def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0,0,1,1)): ... for n in G: G.node[n]['pos']=(xmin + ((xmax-xmin)*random.random()), ymin + ((ymax-ymin)*random.random())) ```
2014-12-16T23:27:34
networkx/networkx
1,317
networkx__networkx-1317
[ "1227" ]
653a11e39194b84f622bd46d4cff5ad8a63a7f1f
diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py --- a/networkx/readwrite/nx_shp.py +++ b/networkx/readwrite/nx_shp.py @@ -55,16 +55,15 @@ def read_shp(path): except ImportError: raise ImportError("read_shp requires OGR: http://www.gdal.org/") - net = nx.DiGraph() - - def getfieldinfo(lyr, feature, flds): - f = feature - return [f.GetField(f.GetFieldIndex(x)) for x in flds] + if not isinstance(path, str): + return - def addlyr(lyr, fields): - for findex in range(lyr.GetFeatureCount()): - f = lyr.GetFeature(findex) - flddata = getfieldinfo(lyr, f, fields) + net = nx.DiGraph() + shp = ogr.Open(path) + for lyr in shp: + fields = [x.GetName() for x in lyr.schema] + for f in lyr: + flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields] g = f.geometry() attributes = dict(zip(fields, flddata)) attributes["ShpName"] = lyr.GetName() @@ -76,14 +75,6 @@ def addlyr(lyr, fields): attributes["Json"] = g.ExportToJson() last = g.GetPointCount() - 1 net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes) - - if isinstance(path, str): - shp = ogr.Open(path) - lyrcount = shp.GetLayerCount() # multiple layers indicate a directory - for lyrindex in range(lyrcount): - lyr = shp.GetLayerByIndex(lyrindex) - flds = [x.GetName() for x in lyr.schema] - addlyr(lyr, flds) return net @@ -168,7 +159,7 @@ def create_feature(geometry, lyr, attributes=None): pass nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint) for n in G: - data = G.node[n] or {} + data = G.node[n] g = netgeometry(n, data) create_feature(g, nodes) try:
diff --git a/networkx/readwrite/tests/test_shp.py b/networkx/readwrite/tests/test_shp.py --- a/networkx/readwrite/tests/test_shp.py +++ b/networkx/readwrite/tests/test_shp.py @@ -50,7 +50,8 @@ def createlayer(driver): for path, name in zip(self.paths, self.names): feat = ogr.Feature(lyr.GetLayerDefn()) g = ogr.Geometry(ogr.wkbLineString) - map(lambda xy: g.AddPoint_2D(*xy), path) + for p in path: + g.AddPoint_2D(*p) feat.SetGeometry(g) feat.SetField("Name", name) lyr.CreateFeature(feat) @@ -60,7 +61,8 @@ def createlayer(driver): def testload(self): expected = nx.DiGraph() - map(expected.add_path, self.paths) + for p in self.paths: + expected.add_path(p) G = nx.read_shp(self.shppath) assert_equal(sorted(expected.node), sorted(G.node)) assert_equal(sorted(expected.edges()), sorted(G.edges())) @@ -101,7 +103,7 @@ def testattributes(lyr, graph): while feature: coords = [] ref = feature.GetGeometryRef() - for i in xrange(ref.GetPointCount()): + for i in range(ref.GetPointCount()): coords.append(ref.GetPoint_2D(i)) name = feature.GetFieldAsString('Name') assert_equal(graph.get_edge_data(*coords)['Name'], name)
xrange vs python 3 Searching the networkx code for `xrange` I see it's used in some "Shapefile" related code and tests. Should this be updated for python 3 compatibility, and is it not tested in the TravisCI testing?
There is also an 'iteritems" in there and maybe some other Python2isms. I suggest we write some proper tests for that module or else consider removing it. Here is an nice example that uses read_shp(), http://ipython-books.github.io/featured-03.html From that link: "At the time of this writing, NetworkX's support of Shapefile doesn't seem to be compatible with Python 3.x." Yes, but it is an indication that there are users and making it Python3 compatible would be good. I wonder if the osgeo library this uses is Python3 ready? On Tue, Jul 29, 2014 at 8:32 PM, argriffing [email protected] wrote: > From that link: "At the time of this writing, NetworkX's support of > Shapefile doesn't seem to be compatible with Python 3.x." > > — > Reply to this email directly or view it on GitHub > https://github.com/networkx/networkx/issues/1227#issuecomment-50566825. > I wonder if the osgeo library this uses is Python3 ready? Yes according to http://trac.osgeo.org/gdal/wiki/GdalOgrInPython it's been ready since version https://pypi.python.org/pypi/GDAL/1.7.0 which was uploaded in early 2010. The .shp files are binary (http://en.wikipedia.org/wiki/Shapefile#Shapefile_shape_format_.28.shp.29), so not easy to paste directly into python unit testing code. Maybe @bwreilly (original author) can help us update this and figure out how to test it? I think testing it should be about the [same as it is now](https://github.com/networkx/networkx/blob/master/networkx/readwrite/tests/test_shp.py), which is to say a pair of shapefiles are created temporarily as part of `setUp`, used for testing, and then eliminated in the `tearDown` method. As for updating to Python 3, I could take a look at it over the weekend. But I'll admit I haven't done much Python 3 in general. Ah yes, there are tests. I completely missed that. Updating to Python3 should be pretty simple (xrange->range, iteritems()->items() etc). I'll take a look and see if there are other issues. I tried fixing those two issues (xrange,iteritems) and the tests fail - maybe the file isn't getting loaded correctly? So any help you can give would be great. The tests may not need to change much, but unless the environment has `osgeo` they will be skipped. For Ubuntu it is packaged under `python3-gdal` if that would help a Python3 TravisCI configuration find it. Yes, that is the correct package (python-gdal, python3-gdal). Added Travis testing under Python 2.7 in dda332e. Had to work around travis-ci/travis-ci#2683 and force `pip` to install an older version of GDAL because the latest is too new for the `libgdal-dev` from `apt-get` on Travis. For Python 3.x, the necessary packages do install, but `f.geometry()` in the code returns `None` instead of something meaningful and breaks the tests as a result. This needs someone familar with the GDAL bindings to take a look (at minimum the `f.geometry()` issue).
2015-01-05T03:56:55
networkx/networkx
1,321
networkx__networkx-1321
[ "1292" ]
7763740355c9685b819aa8a7840c6c1bbbf3105f
diff --git a/networkx/convert.py b/networkx/convert.py --- a/networkx/convert.py +++ b/networkx/convert.py @@ -125,6 +125,19 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): except: raise nx.NetworkXError("Input is not a valid edge list") + # Pandas DataFrame + try: + import pandas as pd + if isinstance(data, pd.DataFrame): + try: + return nx.from_pandas_dataframe(data, create_using=create_using) + except: + msg = "Input is not a correct Pandas DataFrame." + raise nx.NetworkXError(msg) + except ImportError: + msg = 'pandas not found, skipping conversion test.' + warnings.warn(msg, ImportWarning) + # numpy matrix or ndarray try: import numpy diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py --- a/networkx/convert_matrix.py +++ b/networkx/convert_matrix.py @@ -34,9 +34,138 @@ 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) __all__ = ['from_numpy_matrix', 'to_numpy_matrix', + 'from_pandas_dataframe', 'to_pandas_dataframe', 'to_numpy_recarray', 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix'] +def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0): + """Return the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows: + + >>> import pandas as pd + >>> import numpy as np + >>> G = nx.Graph([(1,1)]) + >>> df = nx.to_pandas_dataframe(G) + >>> df + 1 + 1 1 + >>> df.values[np.diag_indices_from(df)] *= 2 + >>> df + 1 + 1 2 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0,1,weight=2) + >>> G.add_edge(1,0) + >>> G.add_edge(2,2,weight=3) + >>> G.add_edge(2,2) + >>> nx.to_pandas_dataframe(G, nodelist=[0,1,2]) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + """ + import pandas as pd + M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge) + if nodelist is None: + nodelist = G.nodes() + nodeset = set(nodelist) + df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist) + return df + +def from_pandas_dataframe(df,create_using=None): + """Return a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph + Use specified graph for result. The default is Graph() + + Notes + ----- + If the numpy matrix has a single data type for each matrix entry it + will be converted to an appropriate Python data type. + + If the numpy matrix has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + from_pandas_dataframe + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> df=pd.DataFrame([[1,1],[2,1]]) + >>> G=nx.from_pandas_dataframe(df) + """ + + import pandas as pd + A = df.values + G = from_numpy_matrix(A, create_using) + try: + df = df[df.index] + except: + raise nx.NetworkXError("Columns must match Indices.", + "%s not in columns"%list(set(df.index).difference(set(df.columns)))) + nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) + return G + def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): """Return the graph adjacency matrix as a NumPy matrix. @@ -90,14 +219,14 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, The matrix entries are assigned to the weight edge attribute. When an edge does not have a weight attribute, the value of the entry is set to the number 1. For multiple (parallel) edges, the values of the entries - are determined by the 'multigraph_weight' paramter. The default is to + are determined by the 'multigraph_weight' parameter. The default is to sum the weight attributes for each of the parallel edges. When `nodelist` does not contain every node in `G`, the matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. The convention used for self-loop edges in graphs is to assign the - diagonal matrix entry value to the weight attributr of the edge + diagonal matrix entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting Numpy matrix can be modified as follows:
diff --git a/networkx/classes/tests/test_timing.py b/networkx/classes/tests/test_timing.py --- a/networkx/classes/tests/test_timing.py +++ b/networkx/classes/tests/test_timing.py @@ -59,7 +59,7 @@ class Benchmark(object): - """ + """ Class to benchmark (time) various Graph routines. Parameters @@ -67,7 +67,7 @@ class Benchmark(object): graph_classes : List of classes to test. tests : List of tests to run on each class. - Format for tests: + Format for tests: (name, (test_string, setup_string, runs, repeats, [cutoff_ratio])) name: A string used to identify this test when reporting results. @@ -82,13 +82,13 @@ class Benchmark(object): Notes ----- - Benchmark uses the timeit package and timeit.Timer class. + Benchmark uses the timeit package and timeit.Timer class. """ def __init__(self, graph_classes, tests=all_tests): self.gc = graph_classes self.tests = tests - def run(self, verbose=False, cutoff_default=2.0): + def run(self, verbose=False, cutoff_default=3): errors='' headers=list(self.gc) if verbose: diff --git a/networkx/drawing/tests/test_pylab.py b/networkx/drawing/tests/test_pylab.py --- a/networkx/drawing/tests/test_pylab.py +++ b/networkx/drawing/tests/test_pylab.py @@ -16,6 +16,7 @@ def setupClass(cls): import matplotlib as mpl mpl.use('PS',warn=False) import matplotlib.pyplot as plt + plt.rcParams['text.usetex'] = False except ImportError: raise SkipTest('matplotlib not available.') except RuntimeError: @@ -26,15 +27,20 @@ def setUp(self): def test_draw(self): - N=self.G - nx.draw_spring(N) - plt.savefig("test.ps") - nx.draw_random(N) - plt.savefig("test.ps") - nx.draw_circular(N) - plt.savefig("test.ps") - nx.draw_spectral(N) - plt.savefig("test.ps") - nx.draw_spring(N.to_directed()) - plt.savefig("test.ps") - os.unlink('test.ps') + try: + N=self.G + nx.draw_spring(N) + plt.savefig("test.ps") + nx.draw_random(N) + plt.savefig("test.ps") + nx.draw_circular(N) + plt.savefig("test.ps") + nx.draw_spectral(N) + plt.savefig("test.ps") + nx.draw_spring(N.to_directed()) + plt.savefig("test.ps") + finally: + try: + os.unlink('test.ps') + except OSError: + pass
added to_pandas_dataframe and from_pandas_dataframe closes issue #1174
2015-01-06T06:09:22
networkx/networkx
1,328
networkx__networkx-1328
[ "1327" ]
c00cf03675a605dd687ef28f7ae312b90f83aedd
diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py --- a/networkx/classes/digraph.py +++ b/networkx/classes/digraph.py @@ -399,8 +399,16 @@ def add_nodes_from(self, nodes, **attr): """ for n in nodes: + # keep all this inside try/except because + # CPython throws TypeError on n not in self.succ, + # while pre-2.7.5 ironpython throws on self.succ[n] try: - newnode=n not in self.succ + if n not in self.succ: + self.succ[n] = self.adjlist_dict_factory() + self.pred[n] = self.adjlist_dict_factory() + self.node[n] = attr.copy() + else: + self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.succ: @@ -413,13 +421,6 @@ def add_nodes_from(self, nodes, **attr): olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) - continue - if newnode: - self.succ[n] = self.adjlist_dict_factory() - self.pred[n] = self.adjlist_dict_factory() - self.node[n] = attr.copy() - else: - self.node[n].update(attr) def remove_node(self, n): """Remove node n. diff --git a/networkx/classes/graph.py b/networkx/classes/graph.py --- a/networkx/classes/graph.py +++ b/networkx/classes/graph.py @@ -507,8 +507,15 @@ def add_nodes_from(self, nodes, **attr): """ for n in nodes: + # keep all this inside try/except because + # CPython throws TypeError on n not in self.succ, + # while pre-2.7.5 ironpython throws on self.succ[n] try: - newnode=n not in self.node + if n not in self.node: + self.adj[n] = self.adjlist_dict_factory() + self.node[n] = attr.copy() + else: + self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.node: @@ -520,12 +527,6 @@ def add_nodes_from(self, nodes, **attr): olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) - continue - if newnode: - self.adj[n] = self.adjlist_dict_factory() - self.node[n] = attr.copy() - else: - self.node[n].update(attr) def remove_node(self,n): """Remove node n.
(Di)Graph.add_nodes_from broken under IronPython It seems that #1314 breaks (Di)Graph.add_nodes_from in IronPython on Travis pretty badly (see https://travis-ci.org/networkx/networkx/jobs/46474504#L3452 for example). I think that I fixed those functions before. IIRC, that was due to `<dict> in <dict>` returning `false` instead of raising `TypeError` in IronPython. #1314 apparently [reintroduced the same logic](https://github.com/networkx/networkx/pull/1314/files#diff-4fe234273eebd1a251430097d68e9854R403). I am not sure if that has been fixed in IronPython, or if .travis.yml is picking up the correct release.
Sorry for not checking the Travis error output for #1314. So--do I understand correctly that inside the try/except clause CPython throws a TypeError for the "if n not in self.succ" while IronPython throws a TypeError on the next line: "self.succ[n]={}"??? Wow--- that's totally off my radar (and a good example of why try-clauses should be kept as short as possible. You can't tell what error is being caught for which reason. :) Would a quick fix in add_nodes_from along with some comments to describe why we have so much in the try-clause be the solution? Supposedly that bug ([CodePlex issue](https://ironpython.codeplex.com/workitem/35348)) has been fixed (IronLanguages/main#221, more specifically IronLanguages/main@2e9b42b667f94f247d54f912230d4e83968d05e6) and released in IronPython 2.7.5. .travis.yml apparently is picking up that release. You can use the following to work around the issue: ``` python for n in nodes: try: if n not in self.succ: self.succ[n] = self.adjlist_dict_factory() self.pred[n] = self.adjlist_dict_factory() self.node[n] = attr.copy() else: self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.succ: self.succ[nn] = self.adjlist_dict_factory() self.pred[nn] = self.adjlist_dict_factory() newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) ``` Even if you manage to get past `n not in self.succ` you will still be caught by `self.succ[n]`. Yes, that makes good sense. I usually don't like to catch two different exceptions with the same exception type, but I'm not used to protecting against two different pythons. :) Welcome back! And Congratulations on you new job! On Fri, Jan 9, 2015 at 8:40 PM, ysitu [email protected] wrote: > Supposedly that bug (CodePlex issue > https://ironpython.codeplex.com/workitem/35348) has been fixed ( > IronLanguages/main#221 https://github.com/IronLanguages/main/pull/221, > more specifically IronLanguages/main@2e9b42b > https://github.com/IronLanguages/main/commit/2e9b42b667f94f247d54f912230d4e83968d05e6) > and released in IronPython 2.7.5. .travis.yml apparently is picking up that > release. > > You can use the following to work around the issue: > > ``` > for n in nodes: > try: > if n not in self.succ: > self.succ[n] = self.adjlist_dict_factory() > self.pred[n] = self.adjlist_dict_factory() > self.node[n] = attr.copy() > else: > self.node[n].update(attr) > except TypeError: > nn,ndict = n > if nn not in self.succ: > self.succ[nn] = self.adjlist_dict_factory() > self.pred[nn] = self.adjlist_dict_factory() > newdict = attr.copy() > newdict.update(ndict) > self.node[nn] = newdict > else: > olddict = self.node[nn] > olddict.update(attr) > olddict.update(ndict) > ``` > > Even if you manage to get past n not in self.succ you will still be > caught by self.succ[n]. > > — > Reply to this email directly or view it on GitHub > https://github.com/networkx/networkx/issues/1327#issuecomment-69432211.
2015-01-10T15:27:34
networkx/networkx
1,359
networkx__networkx-1359
[ "1358" ]
190abfa224afd2dc7889b7a703c55407d01ab394
diff --git a/networkx/algorithms/traversal/breadth_first_search.py b/networkx/algorithms/traversal/breadth_first_search.py --- a/networkx/algorithms/traversal/breadth_first_search.py +++ b/networkx/algorithms/traversal/breadth_first_search.py @@ -17,7 +17,7 @@ def bfs_edges(G, source, reverse=False): ---------- G : NetworkX graph - source : node, optional + source : node Specify starting node for breadth-first search and return edges in the component reachable from source. @@ -40,9 +40,6 @@ def bfs_edges(G, source, reverse=False): ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004. - - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. """ if reverse and isinstance(G, nx.DiGraph): neighbors = G.predecessors_iter @@ -69,7 +66,7 @@ def bfs_tree(G, source, reverse=False): ---------- G : NetworkX graph - source : node, optional + source : node Specify starting node for breadth-first search and return edges in the component reachable from source. @@ -92,9 +89,6 @@ def bfs_tree(G, source, reverse=False): ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004. - - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. """ T = nx.DiGraph() T.add_node(source) @@ -108,7 +102,7 @@ def bfs_predecessors(G, source): ---------- G : NetworkX graph - source : node, optional + source : node Specify starting node for breadth-first search and return edges in the component reachable from source. @@ -128,9 +122,6 @@ def bfs_predecessors(G, source): ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004. - - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. """ return dict((t,s) for s,t in bfs_edges(G,source)) @@ -141,7 +132,7 @@ def bfs_successors(G, source): ---------- G : NetworkX graph - source : node, optional + source : node Specify starting node for breadth-first search and return edges in the component reachable from source. @@ -161,9 +152,6 @@ def bfs_successors(G, source): ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004. - - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. """ d = defaultdict(list) for s,t in bfs_edges(G,source):
Breath First Search: source parameter is not optional (but the docstrings say so) The docstrings of the functions in `networkx/algorithms/traversal/breadth_first_search.py` all claim that the `source` parameter is optional, while the code says otherwise.
2015-02-19T11:49:44
networkx/networkx
1,379
networkx__networkx-1379
[ "1375" ]
9f54f6d993e2290bffd399c2f85029cb8650f313
diff --git a/networkx/generators/bipartite.py b/networkx/generators/bipartite.py --- a/networkx/generators/bipartite.py +++ b/networkx/generators/bipartite.py @@ -24,9 +24,51 @@ 'bipartite_preferential_attachment_graph', 'bipartite_random_graph', 'bipartite_gnmk_random_graph', + 'complete_bipartite_graph', ] +def complete_bipartite_graph(n1, n2, create_using=None): + """Return the complete bipartite graph `K_{n_1,n_2}`. + + Composed of two partitions with `n_1` nodes in the first + and `n_2` nodes in the second. Each node in the first is + connected to each node in the second. + + Parameters + ---------- + n1 : integer + Number of nodes for node set A. + n2 : integer + Number of nodes for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + Node labels are the integers 0 to `n_1 + n_2 - 1`. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + """ + if create_using is None: + G = nx.Graph() + else: + if create_using.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + G = create_using + G.clear() + + top = set(range(n1)) + bottom = set(range(n1, n1+n2)) + G.add_nodes_from(top, bipartite=1) + G.add_nodes_from(bottom, bipartite=0) + G.add_edges_from((u, v) for u in top for v in bottom) + G.graph['name'] = "complete_bipartite_graph(%d,%d)" % (n1, n2) + return G + + def bipartite_configuration_model(aseq, bseq, create_using=None, seed=None): """Return a random bipartite graph from two given degree sequences. diff --git a/networkx/generators/classic.py b/networkx/generators/classic.py --- a/networkx/generators/classic.py +++ b/networkx/generators/classic.py @@ -17,12 +17,12 @@ # All rights reserved. # BSD license. import itertools +from networkx.generators.bipartite import complete_bipartite_graph __author__ ="""Aric Hagberg ([email protected])\nPieter Swart ([email protected])""" __all__ = [ 'balanced_tree', 'barbell_graph', 'complete_graph', - 'complete_bipartite_graph', 'circular_ladder_graph', 'cycle_graph', 'dorogovtsev_goltsev_mendes_graph', @@ -192,25 +192,6 @@ def complete_graph(n,create_using=None): return G -def complete_bipartite_graph(n1,n2,create_using=None): - """Return the complete bipartite graph K_{n1_n2}. - - Composed of two partitions with n1 nodes in the first - and n2 nodes in the second. Each node in the first is - connected to each node in the second. - - Node labels are the integers 0 to n1+n2-1 - - """ - if create_using is not None and create_using.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") - G=empty_graph(n1+n2,create_using) - G.name="complete_bipartite_graph(%d,%d)"%(n1,n2) - for v1 in range(n1): - for v2 in range(n2): - G.add_edge(v1,n1+v2) - return G - def circular_ladder_graph(n,create_using=None): """Return the circular ladder graph CL_n of length n. @@ -500,6 +481,8 @@ def wheel_graph(n,create_using=None): Node labels are the integers 0 to n - 1. """ + if n == 0: + return nx.empty_graph(n, create_using=create_using) G=star_graph(n-1,create_using) G.name="wheel_graph(%d)"%n G.add_edges_from([(v,v+1) for v in range(1,n-1)])
diff --git a/networkx/generators/tests/test_bipartite.py b/networkx/generators/tests/test_bipartite.py --- a/networkx/generators/tests/test_bipartite.py +++ b/networkx/generators/tests/test_bipartite.py @@ -9,6 +9,38 @@ """ class TestGeneratorsBipartite(): + def test_complete_bipartite_graph(self): + G=complete_bipartite_graph(0,0) + assert_true(is_isomorphic( G, null_graph() )) + + for i in [1, 5]: + G=complete_bipartite_graph(i,0) + assert_true(is_isomorphic( G, empty_graph(i) )) + G=complete_bipartite_graph(0,i) + assert_true(is_isomorphic( G, empty_graph(i) )) + + G=complete_bipartite_graph(2,2) + assert_true(is_isomorphic( G, cycle_graph(4) )) + + G=complete_bipartite_graph(1,5) + assert_true(is_isomorphic( G, star_graph(5) )) + + G=complete_bipartite_graph(5,1) + assert_true(is_isomorphic( G, star_graph(5) )) + + # complete_bipartite_graph(m1,m2) is a connected graph with + # m1+m2 nodes and m1*m2 edges + for m1, m2 in [(5, 11), (7, 3)]: + G=complete_bipartite_graph(m1,m2) + assert_equal(number_of_nodes(G), m1 + m2) + assert_equal(number_of_edges(G), m1 * m2) + + assert_raises(networkx.exception.NetworkXError, + complete_bipartite_graph, 7, 3, create_using=DiGraph()) + + mG=complete_bipartite_graph(7, 3, create_using=MultiGraph()) + assert_equal(mG.edges(), G.edges()) + def test_configuration_model(self): aseq=[3,3,3,3] bseq=[2,2,2,2,2] diff --git a/networkx/generators/tests/test_classic.py b/networkx/generators/tests/test_classic.py --- a/networkx/generators/tests/test_classic.py +++ b/networkx/generators/tests/test_classic.py @@ -136,38 +136,6 @@ def test_complete_digraph(self): assert_true(number_of_nodes(g) == m) assert_true(number_of_edges(g) == m * (m - 1)) - def test_complete_bipartite_graph(self): - G=complete_bipartite_graph(0,0) - assert_true(is_isomorphic( G, null_graph() )) - - for i in [1, 5]: - G=complete_bipartite_graph(i,0) - assert_true(is_isomorphic( G, empty_graph(i) )) - G=complete_bipartite_graph(0,i) - assert_true(is_isomorphic( G, empty_graph(i) )) - - G=complete_bipartite_graph(2,2) - assert_true(is_isomorphic( G, cycle_graph(4) )) - - G=complete_bipartite_graph(1,5) - assert_true(is_isomorphic( G, star_graph(5) )) - - G=complete_bipartite_graph(5,1) - assert_true(is_isomorphic( G, star_graph(5) )) - - # complete_bipartite_graph(m1,m2) is a connected graph with - # m1+m2 nodes and m1*m2 edges - for m1, m2 in [(5, 11), (7, 3)]: - G=complete_bipartite_graph(m1,m2) - assert_equal(number_of_nodes(G), m1 + m2) - assert_equal(number_of_edges(G), m1 * m2) - - assert_raises(networkx.exception.NetworkXError, - complete_bipartite_graph, 7, 3, create_using=DiGraph()) - - mG=complete_bipartite_graph(7, 3, create_using=MultiGraph()) - assert_equal(mG.edges(), G.edges()) - def test_circular_ladder_graph(self): G=circular_ladder_graph(5) assert_raises(networkx.exception.NetworkXError, circular_ladder_graph,
Construction `complete_bipartite_graph` does not set the 'bipartite' node attribute. The complete bipartite graph generator does not set the 'bipartite' attribute. This is particularly problematic since the `complete_bipartite_graph` is the fallback method for `bipartite_random_graph(n,m,p)` when `p>=1` and for `bipartite_gnmk_random_graph(n,m,k)` when `k>=n*m`. Therefore the latter graph generators may not set the 'bipartite' attribute, despite what is documented. The obvious fix is to make `complete_bipartite_graph` set the node attribute, too.
2015-03-02T21:01:54
networkx/networkx
1,396
networkx__networkx-1396
[ "1394" ]
2b6a7ea079f48210deb8cb2dfb6f0f1d72949992
diff --git a/networkx/classes/graph.py b/networkx/classes/graph.py --- a/networkx/classes/graph.py +++ b/networkx/classes/graph.py @@ -519,7 +519,7 @@ def add_nodes_from(self, nodes, **attr): except TypeError: nn,ndict = n if nn not in self.node: - self.adj[nn] = {} + self.adj[nn] = self.adjlist_dict_factory() newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict @@ -860,7 +860,7 @@ def add_edges_from(self, ebunch, attr_dict=None, **attr): u,v,dd = e elif ne==2: u,v = e - dd = {} + dd = {} #doesnt need edge_attr_dict_factory else: raise NetworkXError(\ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
Function add_nodes_from does not use adjlist_dict_factory properly While I was reading through the code of the Graph class, I have found what I think must be a bug in the implementation. Look at the code below: ``` python for n in nodes: # keep all this inside try/except because # CPython throws TypeError on n not in self.succ, # while pre-2.7.5 ironpython throws on self.succ[n] try: if n not in self.node: self.adj[n] = self.adjlist_dict_factory() self.node[n] = attr.copy() else: self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.node: self.adj[nn] = {} newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) ``` So, the problem is with these two lines: **self.adj[n] = self.adjlist_dict_factory()** in the try part of the code and **self.adj[nn] = {}** after the except TypeError if the node is not present in the graph Both assignments initialize in different ways the adjacency list for a node. I think this could lead to weird behavior when calling this function.
2015-03-06T12:17:33
networkx/networkx
1,407
networkx__networkx-1407
[ "1406" ]
33c39a05c03d699d76c4e4194ffeb9d818ee3934
diff --git a/networkx/algorithms/connectivity/connectivity.py b/networkx/algorithms/connectivity/connectivity.py --- a/networkx/algorithms/connectivity/connectivity.py +++ b/networkx/algorithms/connectivity/connectivity.py @@ -454,12 +454,13 @@ def all_pairs_node_connectivity(G, nbunch=None, flow_func=None): else: nbunch = set(nbunch) - if G.is_directed(): + directed = G.is_directed() + if directed: iter_func = itertools.permutations else: iter_func = itertools.combinations - all_pairs = dict.fromkeys(nbunch, dict()) + all_pairs = {n: {} for n in nbunch} # Reuse auxiliary digraph and residual network H = build_auxiliary_node_connectivity(G) @@ -470,6 +471,8 @@ def all_pairs_node_connectivity(G, nbunch=None, flow_func=None): for u, v in iter_func(nbunch, 2): K = local_node_connectivity(G, u, v, **kwargs) all_pairs[u][v] = K + if not directed: + all_pairs[v][u] = K return all_pairs
diff --git a/networkx/algorithms/connectivity/tests/test_connectivity.py b/networkx/algorithms/connectivity/tests/test_connectivity.py --- a/networkx/algorithms/connectivity/tests/test_connectivity.py +++ b/networkx/algorithms/connectivity/tests/test_connectivity.py @@ -279,7 +279,58 @@ def test_edge_connectivity_flow_vs_stoer_wagner(): G = graph_func() assert_equal(nx.stoer_wagner(G)[0], nx.edge_connectivity(G)) -class TestConnectivityPairs(object): + +class TestAllPairsNodeConnectivity: + + def setUp(self): + self.path = nx.path_graph(7) + self.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + self.cycle = nx.cycle_graph(7) + self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + self.gnp = nx.gnp_random_graph(30, 0.1) + self.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True) + self.K20 = nx.complete_graph(20) + self.K10 = nx.complete_graph(10) + self.K5 = nx.complete_graph(5) + self.G_list = [self.path, self.directed_path, self.cycle, + self.directed_cycle, self.gnp, self.directed_gnp, self.K10, + self.K5, self.K20] + + def test_cycles(self): + K_undir = nx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert_true(k == 2) + K_dir = nx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert_true(k == 1) + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = nx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert_true(k == len(G)-1) + + def test_paths(self): + K_undir = nx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert_true(k == 1) + K_dir = nx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert_true(k == 1) + else: + assert_true(k == 0) + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert_equal(len(C), len(nbunch)) def test_all_pairs_connectivity_icosahedral(self): G = nx.icosahedral_graph() @@ -290,9 +341,9 @@ def test_all_pairs_connectivity(self): G = nx.Graph() nodes = [0, 1, 2, 3] G.add_path(nodes) - A = dict.fromkeys(G, dict()) + A = {n: {} for n in G} for u, v in itertools.combinations(nodes,2): - A[u][v] = nx.node_connectivity(G, u, v) + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) C = nx.all_pairs_node_connectivity(G) assert_equal(sorted((k, sorted(v)) for k, v in A.items()), sorted((k, sorted(v)) for k, v in C.items())) @@ -301,7 +352,7 @@ def test_all_pairs_connectivity_directed(self): G = nx.DiGraph() nodes = [0, 1, 2, 3] G.add_path(nodes) - A = dict.fromkeys(G, dict()) + A = {n: {} for n in G} for u, v in itertools.permutations(nodes, 2): A[u][v] = nx.node_connectivity(G, u, v) C = nx.all_pairs_node_connectivity(G) @@ -311,9 +362,9 @@ def test_all_pairs_connectivity_directed(self): def test_all_pairs_connectivity_nbunch(self): G = nx.complete_graph(5) nbunch = [0, 2, 3] - A = dict.fromkeys(nbunch, dict()) + A = {n: {} for n in nbunch} for u, v in itertools.combinations(nbunch, 2): - A[u][v] = nx.node_connectivity(G, u, v) + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) assert_equal(sorted((k, sorted(v)) for k, v in A.items()), sorted((k, sorted(v)) for k, v in C.items())) @@ -321,9 +372,9 @@ def test_all_pairs_connectivity_nbunch(self): def test_all_pairs_connectivity_nbunch_iter(self): G = nx.complete_graph(5) nbunch = [0, 2, 3] - A = dict.fromkeys(nbunch, dict()) + A = {n: {} for n in nbunch} for u, v in itertools.combinations(nbunch, 2): - A[u][v] = nx.node_connectivity(G, u, v) + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch)) assert_equal(sorted((k, sorted(v)) for k, v in A.items()), sorted((k, sorted(v)) for k, v in C.items()))
bug in all_pairs_node_connectivity For building the dictionary to store the results I was using: ``` python all_pairs = dict.fromkeys(nbunch, dict()) ``` Which is using refrences to the same dict for each node. The tests did not catch this (ouch!), I found out while working on #1405. I'll send a PR fixing it, by using: ``` python all_pairs = {n: {} for n in nbunch} ``` I'll also add tests.
2015-03-15T02:57:54
networkx/networkx
1,415
networkx__networkx-1415
[ "580" ]
aeb055dcf2464ec2d26d38af286163e6ed50a176
diff --git a/examples/subclass/antigraph.py b/examples/subclass/antigraph.py new file mode 100644 --- /dev/null +++ b/examples/subclass/antigraph.py @@ -0,0 +1,207 @@ +""" Complement graph class for small footprint when working on dense graphs. + +This class allows you to add the edges that *do not exist* in the dense +graph. However, when applying algorithms to this complement graph data +structure, it behaves as if it were the dense version. So it can be used +directly in several NetworkX algorithms. + +This subclass has only been tested for k-core, connected_components, +and biconnected_components algorithms but might also work for other +algorithms. + +""" +# Copyright (C) 2015 by +# Jordi Torrents <[email protected]> +# All rights reserved. +# BSD license. +import networkx as nx +from networkx.exception import NetworkXError + + +__author__ = """\n""".join(['Jordi Torrents <[email protected]>']) + +__all__ = ['AntiGraph'] + + +class AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory foodprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. + """ + + all_edge_dict = {'weight': 1} + def single_edge_dict(self): + return self.all_edge_dict + edge_attr_dict_factory = single_edge_dict + + def __getitem__(self, n): + """Return a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + return dict((node, self.all_edge_dict) for node in + set(self.adj) - set(self.adj[n]) - set([n])) + + + def neighbors(self, n): + """Return a list of the nodes connected to the node n in + the dense graph. + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + nlist : list + A list of nodes that are adjacent to n. + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + """ + try: + return list(set(self.adj) - set(self.adj[n]) - set([n])) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def neighbors_iter(self, n): + """Return an iterator over all neighbors of node n in the + dense graph. + + """ + try: + return iter(set(self.adj) - set(self.adj[n]) - set([n])) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def degree(self, nbunch=None, weight=None): + """Return the degree of a node or nodes in the dense graph. + """ + if nbunch in self: # return a single node + return next(self.degree_iter(nbunch,weight))[1] + else: # return a dict + return dict(self.degree_iter(nbunch,weight)) + + def degree_iter(self, nbunch=None, weight=None): + """Return an iterator for (node, degree) in the dense graph. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs = ((n, {v: self.all_edge_dict for v in + set(self.adj) - set(self.adj[n]) - set([n])}) + for n in self.nodes_iter()) + else: + nodes_nbrs= ((n, {v: self.all_edge_dict for v in + set(self.nodes()) - set(self.adj[n]) - set([n])}) + for n in self.nbunch_iter(nbunch)) + + if weight is None: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree) + else: + # AntiGraph is a ThinGraph so all edges have weight 1 + for n,nbrs in nodes_nbrs: + yield (n, sum((nbrs[nbr].get(weight, 1) for nbr in nbrs)) + + (n in nbrs and nbrs[n].get(weight, 1))) + + def adjacency_iter(self): + """Return an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self.adj: + yield (n, set(self.adj) - set(self.adj[n]) - set([n])) + + +if __name__ == '__main__': + # Build several pairs of graphs, a regular graph + # and the AntiGraph of it's complement, which behaves + # as if it were the original graph. + Gnp = nx.gnp_random_graph(20,0.8) + Anp = AntiGraph(nx.complement(Gnp)) + Gd = nx.davis_southern_women_graph() + Ad = AntiGraph(nx.complement(Gd)) + Gk = nx.karate_club_graph() + Ak = AntiGraph(nx.complement(Gk)) + pairs = [(Gnp, Anp), (Gd, Ad), (Gk, Ak)] + # test connected components + for G, A in pairs: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert comp in gc + # test biconnected components + for G, A in pairs: + gc = [set(c) for c in nx.biconnected_components(G)] + ac = [set(c) for c in nx.biconnected_components(A)] + for comp in ac: + assert comp in gc + # test degree + for G, A in pairs: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert G.degree(node) == A.degree(node) + assert sum(G.degree().values()) == sum(A.degree().values()) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert sum(A.degree().values()) == sum(A.degree(weight='weight').values()) + assert sum(G.degree(nodes).values()) == sum(A.degree(nodes).values()) diff --git a/networkx/algorithms/approximation/__init__.py b/networkx/algorithms/approximation/__init__.py --- a/networkx/algorithms/approximation/__init__.py +++ b/networkx/algorithms/approximation/__init__.py @@ -2,6 +2,7 @@ from networkx.algorithms.approximation.clique import * from networkx.algorithms.approximation.connectivity import * from networkx.algorithms.approximation.dominating_set import * +from networkx.algorithms.approximation.kcomponents import * from networkx.algorithms.approximation.independent_set import * from networkx.algorithms.approximation.matching import * from networkx.algorithms.approximation.ramsey import * diff --git a/networkx/algorithms/approximation/kcomponents.py b/networkx/algorithms/approximation/kcomponents.py new file mode 100644 --- /dev/null +++ b/networkx/algorithms/approximation/kcomponents.py @@ -0,0 +1,350 @@ +""" Fast approximation for k-component structure +""" +# Copyright (C) 2015 by +# Jordi Torrents <[email protected]> +# All rights reserved. +# BSD license. +import itertools +import collections + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +from networkx.algorithms.approximation import local_node_connectivity +from networkx.algorithms.connectivity import \ + local_node_connectivity as exact_local_node_connectivity +from networkx.algorithms.connectivity import build_auxiliary_node_connectivity +from networkx.algorithms.flow import build_residual_network + + +__author__ = """\n""".join(['Jordi Torrents <[email protected]>']) + +__all__ = ['k_components'] + + +not_implemented_for('directed') +def k_components(G, min_density=0.95): + r"""Returns the approximate k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + This implementation is based on the fast heuristics to approximate + the `k`-component sturcture of a graph [1]_. Which, in turn, it is based on + a fast approximation algorithm for finding good lower bounds of the number + of node independent paths between two nodes [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + min_density : Float + Density relaxation treshold. Default value 0.95 + + Returns + ------- + k_components : dict + Dictionary with connectivity level `k` as key and a list of + sets of nodes that form a k-component of level `k` as values. + + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> from networkx.algorithms import approximation as apxa + >>> G = nx.petersen_graph() + >>> k_components = apxa.k_components(G) + + Notes + ----- + The logic of the approximation algorithm for computing the `k`-component + structure [1]_ is based on repeatedly applying simple and fast algorithms + for `k`-cores and biconnected components in order to narrow down the + number of pairs of nodes over which we have to compute White and Newman's + approximation algorithm for finding node independent paths [2]_. More + formally, this algorithm is based on Whitney's theorem, which states + an inclusion relation among node connectivity, edge connectivity, and + minimum degree for any graph G. This theorem implies that every + `k`-component is nested inside a `k`-edge-component, which in turn, + is contained in a `k`-core. Thus, this algorithm computes node independent + paths among pairs of nodes in each biconnected part of each `k`-core, + and repeats this procedure for each `k` from 3 to the maximal core number + of a node in the input graph. + + Because, in practice, many nodes of the core of level `k` inside a + bicomponent actually are part of a component of level k, the auxiliary + graph needed for the algorithm is likely to be very dense. Thus, we use + a complement graph data structure (see `AntiGraph`) to save memory. + AntiGraph only stores information of the edges that are *not* present + in the actual auxiliary graph. When applying algorithms to this + complement graph data structure, it behaves as if it were the dense + version. + + See also + -------- + k_components + + References + ---------- + .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion: + Visualization and Heuristics for Fast Computation. + http://arxiv.org/pdf/1503.04476v1 + + .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values + k_components = collections.defaultdict(list) + # make a few functions local for speed + node_connectivity = local_node_connectivity + k_core = nx.k_core + core_number = nx.core_number + biconnected_components = nx.biconnected_components + density = nx.density + combinations = itertools.combinations + # Exact solution for k = {1,2} + # There is a linear time algorithm for triconnectivity, if we had an + # implementation available we could start from k = 4. + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + for bicomponent in nx.biconnected_components(G): + # avoid considering dyads as bicomponents + bicomp = set(bicomponent) + if len(bicomp) > 2: + k_components[2].append(bicomp) + # There is no k-component of k > maximum core number + # \kappa(G) <= \lambda(G) <= \delta(G) + g_cnumber = core_number(G) + max_core = max(g_cnumber.values()) + for k in range(3, max_core + 1): + C = k_core(G, k, core_number=g_cnumber) + for nodes in biconnected_components(C): + # Build a subgraph SG induced by the nodes that are part of + # each biconnected component of the k-core subgraph C. + if len(nodes) < k: + continue + SG = G.subgraph(nodes) + # Build auxiliary graph + H = _AntiGraph() + H.add_nodes_from(SG.nodes_iter()) + for u,v in combinations(SG, 2): + K = node_connectivity(SG, u, v, cutoff=k) + if k > K: + H.add_edge(u,v) + for h_nodes in biconnected_components(H): + if len(h_nodes) <= k: + continue + SH = H.subgraph(h_nodes) + for Gc in _cliques_heuristic(SG, SH, k, min_density): + for k_nodes in biconnected_components(Gc): + Gk = nx.k_core(SG.subgraph(k_nodes), k) + if len(Gk) <= k: + continue + k_components[k].append(set(Gk)) + return k_components + + +def _cliques_heuristic(G, H, k, min_density): + h_cnumber = nx.core_number(H) + for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)): + cands = set(n for n, c in h_cnumber.items() if c == c_value) + # Skip checking for overlap for the highest core value + if i == 0: + overlap = False + else: + overlap = set.intersection(*[ + set(x for x in H[n] if x not in cands) + for n in cands]) + if overlap and len(overlap) < k: + SH = H.subgraph(cands | overlap) + else: + SH = H.subgraph(cands) + sh_cnumber = nx.core_number(SH) + SG = nx.k_core(G.subgraph(SH), k) + while not (_same(sh_cnumber) and nx.density(SH) >= min_density): + SH = H.subgraph(SG) + if len(SH) <= k: + break + sh_cnumber = nx.core_number(SH) + sh_deg = SH.degree() + min_deg = min(sh_deg.values()) + SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg) + SG = nx.k_core(G.subgraph(SH), k) + else: + yield SG + + +def _same(measure, tol=0): + vals = set(measure.values()) + if (max(vals) - min(vals)) <= tol: + return True + return False + + +class _AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory foodprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. In this + case we only use k-core, connected_components, and biconnected_components. + """ + + all_edge_dict = {'weight': 1} + def single_edge_dict(self): + return self.all_edge_dict + edge_attr_dict_factory = single_edge_dict + + def __getitem__(self, n): + """Return a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + all_edge_dict = self.all_edge_dict + return dict((node, all_edge_dict) for node in + set(self.adj) - set(self.adj[n]) - set([n])) + + def neighbors(self, n): + """Return a list of the nodes connected to the node n in + the dense graph. + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + nlist : list + A list of nodes that are adjacent to n. + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + """ + try: + return list(set(self.adj) - set(self.adj[n]) - set([n])) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def neighbors_iter(self, n): + """Return an iterator over all neighbors of node n in the + dense graph. + + """ + try: + return iter(set(self.adj) - set(self.adj[n]) - set([n])) + except KeyError: + raise NetworkXError("The node %s is not in the graph."%(n,)) + + def degree(self, nbunch=None, weight=None): + """Return the degree of a node or nodes in the dense graph. + """ + if nbunch in self: # return a single node + return next(self.degree_iter(nbunch,weight))[1] + else: # return a dict + return dict(self.degree_iter(nbunch,weight)) + + def degree_iter(self, nbunch=None, weight=None): + """Return an iterator for (node, degree) in the dense graph. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_path([0,1,2,3]) + >>> list(G.degree_iter(0)) # node 0 with degree 1 + [(0, 1)] + >>> list(G.degree_iter([0,1])) + [(0, 1), (1, 2)] + + """ + if nbunch is None: + nodes_nbrs = ((n, {v: self.all_edge_dict for v in + set(self.adj) - set(self.adj[n]) - set([n])}) + for n in self.nodes_iter()) + else: + nodes_nbrs = ((n, {v: self.all_edge_dict for v in + set(self.nodes()) - set(self.adj[n]) - set([n])}) + for n in self.nbunch_iter(nbunch)) + + if weight is None: + for n,nbrs in nodes_nbrs: + yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree) + else: + # AntiGraph is a ThinGraph so all edges have weight 1 + for n,nbrs in nodes_nbrs: + yield (n, sum((nbrs[nbr].get(weight, 1) for nbr in nbrs)) + + (n in nbrs and nbrs[n].get(weight, 1))) + + def adjacency_iter(self): + """Return an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self.adj: + yield (n, set(self.adj) - set(self.adj[n]) - set([n]))
diff --git a/networkx/algorithms/approximation/tests/test_connectivity.py b/networkx/algorithms/approximation/tests/test_connectivity.py --- a/networkx/algorithms/approximation/tests/test_connectivity.py +++ b/networkx/algorithms/approximation/tests/test_connectivity.py @@ -61,10 +61,12 @@ def test_octahedral(): assert_equal(4, approx.node_connectivity(G)) assert_equal(4, approx.node_connectivity(G, 0, 5)) -def test_icosahedral(): - G=nx.icosahedral_graph() - assert_equal(5, approx.node_connectivity(G)) - assert_equal(5, approx.node_connectivity(G, 0, 5)) +# Approximation can fail with icosahedral graph depending +# on iteration order. +#def test_icosahedral(): +# G=nx.icosahedral_graph() +# assert_equal(5, approx.node_connectivity(G)) +# assert_equal(5, approx.node_connectivity(G, 0, 5)) def test_only_source(): G = nx.complete_graph(5) diff --git a/networkx/algorithms/approximation/tests/test_kcomponents.py b/networkx/algorithms/approximation/tests/test_kcomponents.py new file mode 100644 --- /dev/null +++ b/networkx/algorithms/approximation/tests/test_kcomponents.py @@ -0,0 +1,269 @@ +# Test for approximation to k-components algorithm +from nose.tools import assert_equal, assert_true, assert_false, assert_raises, raises +import networkx as nx +from networkx.algorithms.approximation import k_components +from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same + + +def build_k_number_dict(k_components): + k_num = {} + for k, comps in sorted(k_components.items()): + for comp in comps: + for node in comp: + k_num[node] = k + return k_num + +## +## Some nice synthetic graphs +## +def graph_example_1(): + G = nx.convert_node_labels_to_integers(nx.grid_graph([5,5]), + label_attribute='labels') + rlabels = nx.get_node_attributes(G, 'labels') + labels = dict((v, k) for k, v in rlabels.items()) + + for nodes in [(labels[(0,0)], labels[(1,0)]), + (labels[(0,4)], labels[(1,4)]), + (labels[(3,0)], labels[(4,0)]), + (labels[(3,4)], labels[(4,4)]) ]: + new_node = G.order()+1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G,P) + # Add two edges between the grid and P + G.add_edge(new_node+1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G,K) + # Add three edges between P and K5 + G.add_edge(new_node+2,new_node+11) + G.add_edge(new_node+3,new_node+12) + G.add_edge(new_node+4,new_node+13) + # Add another K5 sharing a node + G = nx.disjoint_union(G,K) + nbrs = G[new_node+10] + G.remove_node(new_node+10) + for nbr in nbrs: + G.add_edge(new_node+17, nbr) + G.add_edge(new_node+16, new_node+5) + + G.name = 'Example graph for connectivity' + return G + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers(nx.grid_graph([5,5]), + label_attribute='labels') + rlabels = nx.get_node_attributes(G, 'labels') + labels = dict((v, k) for k, v in rlabels.items()) + + for nodes in [ (labels[(0,4)], labels[(1,4)]), + (labels[(3,4)], labels[(4,4)]) ]: + new_node = G.order()+1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G,P) + # Add two edges between the grid and P + G.add_edge(new_node+1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G,K) + # Add three edges between P and K5 + G.add_edge(new_node+2,new_node+11) + G.add_edge(new_node+3,new_node+12) + G.add_edge(new_node+4,new_node+13) + # Add another K5 sharing a node + G = nx.disjoint_union(G,K) + nbrs = G[new_node+10] + G.remove_node(new_node+10) + for nbr in nbrs: + G.add_edge(new_node+17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node+16, new_node+8) + + for nodes in [(labels[(0,0)], labels[(1,0)]), + (labels[(3,0)], labels[(4,0)])]: + new_node = G.order()+1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G,P) + # Add two edges between the grid and P + G.add_edge(new_node+1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G,K) + # Add three edges between P and K5 + G.add_edge(new_node+2,new_node+11) + G.add_edge(new_node+3,new_node+12) + G.add_edge(new_node+4,new_node+13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G,K) + nbrs = G[new_node+10] + G.remove_node(new_node+10) + for nbr in nbrs: + G.add_edge(new_node+17, nbr) + nbrs2 = G[new_node+9] + G.remove_node(new_node+9) + for nbr in nbrs2: + G.add_edge(new_node+18, nbr) + + G.name = 'Example graph for connectivity' + return G + +# Helper function +def _check_connectivity(G): + result = k_components(G) + for k, components in result.items(): + if k < 3: + continue + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert_true(K >= k) + +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_connectivity(G) + +def test_example_1(): + G = graph_example_1() + _check_connectivity(G) + +def test_random_gnp(): + G = nx.gnp_random_graph(50, 0.2) + _check_connectivity(G) + +def test_shell(): + constructor=[(20,80,0.8),(80,180,0.6)] + G = nx.random_shell_graph(constructor) + _check_connectivity(G) + +def test_configuration(): + deg_seq = nx.utils.create_degree_sequence(100,nx.utils.powerlaw_sequence) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(G.selfloop_edges()) + _check_connectivity(G) + +def test_karate_0(): + G = nx.karate_club_graph() + _check_connectivity(G) + +def test_karate_1(): + karate_k_num = {0: 4, 1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 2, + 10: 3, 11: 1, 12: 2, 13: 4, 14: 2, 15: 2, 16: 2, 17: 2, 18: 2, + 19: 3, 20: 2, 21: 2, 22: 2, 23: 3, 24: 3, 25: 3, 26: 2, 27: 3, + 28: 3, 29: 3, 30: 4, 31: 3, 32: 4, 33: 4} + G = nx.karate_club_graph() + k_comps = k_components(G) + k_num = build_k_number_dict(k_comps) + assert_equal(karate_k_num, k_num) + +def test_example_1_detail_3_and_4(): + solution = { + 3: [set([40, 41, 42, 43, 39]), + set([32, 33, 34, 35, 36, 37, 38, 42, 25, 26, 27, 28, 29, 30, 31]), + set([58, 59, 60, 61, 62]), + set([44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61]), + set([80, 81, 77, 78, 79]), + set([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 80, 63]), + set([97, 98, 99, 100, 101]), + set([96, 100, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 94, 95]) + ], + 4: [set([40, 41, 42, 43, 39]), + set([42, 35, 36, 37, 38]), + set([58, 59, 60, 61, 62]), + set([56, 57, 61, 54, 55]), + set([80, 81, 77, 78, 79]), + set([80, 73, 74, 75, 76]), + set([97, 98, 99, 100, 101]), + set([96, 100, 92, 94, 95]) + ], + } + G = graph_example_1() + result = k_components(G) + for k, components in solution.items(): + for component in components: + assert_true(component in result[k]) + +@raises(nx.NetworkXNotImplemented) +def test_directed(): + G = nx.gnp_random_graph(10, 0.4, directed=True) + kc = k_components(G) + +def test_same(): + equal = {'A': 2, 'B': 2, 'C': 2} + slightly_different = {'A': 2, 'B': 1, 'C': 2} + different = {'A': 2, 'B': 8, 'C': 18} + assert_true(_same(equal)) + assert_false(_same(slightly_different)) + assert_true(_same(slightly_different, tol=1)) + assert_false(_same(different)) + assert_false(_same(different, tol=4)) + + +class TestAntiGraph: + def setUp(self): + self.Gnp = nx.gnp_random_graph(20,0.8) + self.Anp = _AntiGraph(nx.complement(self.Gnp)) + self.Gd = nx.davis_southern_women_graph() + self.Ad = _AntiGraph(nx.complement(self.Gd)) + self.Gk = nx.karate_club_graph() + self.Ak = _AntiGraph(nx.complement(self.Gk)) + self.GA = [(self.Gnp, self.Anp), + (self.Gd,self.Ad), + (self.Gk, self.Ak)] + + def test_size(self): + for G, A in self.GA: + n = G.order() + s = len(G.edges())+len(A.edges()) + assert_true(s == (n*(n-1))/2) + + def test_degree(self): + for G, A in self.GA: + assert_equal(G.degree(), A.degree()) + + def test_core_number(self): + for G, A in self.GA: + assert_equal(nx.core_number(G), nx.core_number(A)) + + def test_connected_components(self): + for G, A in self.GA: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert_true(comp in gc) + + def test_adjacency_iter(self): + for G, A in self.GA: + a_adj = list(A.adjacency_iter()) + for n, nbrs in G.adjacency_iter(): + assert_true((n, set(nbrs)) in a_adj) + + def test_neighbors(self): + for G, A in self.GA: + node = list(G.nodes())[0] + assert_equal(set(G.neighbors(node)), set(A.neighbors(node))) + + def test_node_not_in_graph(self): + for G, A in self.GA: + node = 'non_existent_node' + assert_raises(nx.NetworkXError, A.neighbors, node) + assert_raises(nx.NetworkXError, A.neighbors_iter, node) + assert_raises(nx.NetworkXError, G.neighbors, node) + assert_raises(nx.NetworkXError, G.neighbors_iter, node) + + def test_degree(self): + for G, A in self.GA: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert_equal(G.degree(node), A.degree(node)) + assert_equal(sum(G.degree().values()), sum(A.degree().values())) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert_equal(sum(A.degree().values()), + sum(A.degree(weight='weight').values())) + assert_equal(sum(G.degree(nodes).values()), + sum(A.degree(nodes).values()))
k-components approximation (migrated from Trac #589) Original ticket https://networkx.lanl.gov/trac/ticket/589 Reported 2011-07-07 by @jtorrents, assigned to @jtorrents. New version of the algorithm. To run it you'll need the code for vertex connectivity approximation #538, the antigraph data structure #608. For accuracy tests you will also need the flow based connectivity code #625 A k-component is a maximal subgraph that cannot be disconnected removing less than k nodes (along with their incident edges). By Merger's theorem, this is equivalent to a maximal subgraph in which all pairs of nodes are connected by at least k node-independent paths which each run entirely within the subgraph. Note that a k-component must be a subset of a (k-1)-component or be a (k-1)-component itself. Thus, they have an inherent hiearchical structure. Components of level k can overlap in k-1 nodes. Following White and Harary (2001) and Moody and White (2003), the cohesive structure of a network can be conceptualized as increasingly cohesive groups nested inside each other. Those groups can be operacionalized as the k-components of the network. A common structural pattern in large networks is an hierarchical nesting of increasingly cohesive groups at low connectivity levels and non-overlapping highly cohesive groups at higher connectivity levels. Those highly cohesive groups play a key role in the diffusion of the consequences of social interactions among actors in networks. It is usually assumed that the transmission through the network of knowledge, influence and resources generated by social interactions is limited to people 2 or 3 steps away from the initiator of such interactions. However, strongly cohesive subsets of nodes allow repetition of information and reinforcement of influence because they are characterized by multiple independent pathways that compensate the decay effects of the transmission of knowledge, influence and resources. An exact solution for the computation of the complete k-component structure (Moody & White, 2003) is impractical for large networks. This approximation is based on the proposal of White and Newman (2001) of a fast approximation for finding node independent paths (implemented in #538). It allows us to compute the approximate k-component structure for moderately large networks, in a reasonable time frame. The algorithm consists in 6 steps: 1. k-core computation (time O(E)): Compute the core number for each node. A k-core is a maximal subgraph that contains nodes of degree k or more. The core number of a node is the largest value k of a k-core containing that node. This is a baseline for k-components because \kappa(G) <= \lambda(G) <= \delta(G) 2. bicomponents computation (time O(V+E)): Compute the biconnected components of the graph. (k>2)-components are a subset of (or are themselves also) a bicomponent. Besides, the exact computation is faster than the approximation because usually bicomponents are quite large. Thus we start computing the approximation for k=3. (same for tricomponents but it is not implemented in NetworkX yet) 3. for each k from 3 to the maximum core number of a node of the graph we repeat the following steps: 4. For each bicomponent we create a subgraph with all nodes with core level = k. Compute the connected component of the k-core sugraph (note that the k-core could be disconnected) and then compute the approximation for vertex connectivity (proposed by White and Newman (2001)) between all pairs of nodes. Implemented in ticket #538. 5. Build an auxiliary graph with all nodes in the subgraph analyzed and edges between two nodes if the local vertex connectivity (ie number of node independent paths between them) is greater or equal than the level k of the main for loop. Note: we actually use a complement graph data structure to save memory because usually a big part of the k-core is actually a k-component. 6. Each connected component of the auxiliary graph is a candidate to be a k-component. But we need to do a final check: we create the induced subgraph of candidate nodes from the input graph and discard any node that, in this induced subgraph, has core number less than k. The remaining nodes are assumed to be a k-component. References ``` White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for Node-Independent Paths. Santa Fe Institute Working Paper 01-07-035 http://eclectic.ss.uci.edu/~drwhite/working.pdf White, D. R., and F. Harary. (2001) The cohesiveness of blocks in social networks: Node connectivity and conditional density, Sociological Methodology 31:305--59. http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF Moody, James and Douglas R. White. 2003. Social Cohesion and Embeddedness. American Sociological Review. 68:103-127 http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf ```
Attachment in Trac by @jtorrents, 2011-07-07: [test_1m.adj](https://networkx.lanl.gov/trac/attachment/ticket/589/test_1m.adj) Attachment in Trac by @jtorrents, 2011-07-07: [test_2m.adj](https://networkx.lanl.gov/trac/attachment/ticket/589/test_2m.adj) Attachment in Trac by @jtorrents, 2011-07-07: [k_components.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components.py) Comment in Trac by @jtorrents, 2011-07-07 To run the script you need the code for node_independent_paths (#538) and to check the results against the exact computation for bicomponents you also need igraph (maybe we should also implement a biconnected_components function in NetworkX). The output in my machine: ''' Output of accuracy test removed from comments ''' Comment in Trac by @jtorrents, 2011-07-07 The main problem with this implementation is the high consumption of RAM for large networks. I don't know how to properly debug RAM usage (other than add prints to the algorithm to know what it is doing and checking the '''top''' command). For instance, this algorithm applied to a gnm_random_graph(7000, 8000) consumes almost 3 Gb of RAM. This must be improved. Comment in Trac by @jtorrents, 2011-07-17 Following Aric's smart suggestion to avoid the creation of a very dense graph in step 3 (which was what was consuming immensely huge amounts of memory for large graphs) by building instead its complement; and after a fruitful discussion at Scipy sprints, Dan implemented the "anticore number" algorithm (ie to compute the core number of the dense graph working only with its complement). With this improvements, the new implementation of the algorithm (see attached files) uses a lot less memory, which will hopefully allow to apply it to large networks (I'm hoping to be able to apply it to sparse networks of the order of hundreds of thousands of nodes and edges). The anticore algorithm needs more work to work properly in bipartite networks though (I'll work on this). The new implementation of the approximation is a bit less accurate in bipartite networks because the anticore algo does not take into account the restrictions in terms of complement degree and complement neighbors that bipartite networks have. Despite that, the approximation works quite well for 2-components (see tests of accuracy in k_components_1.py). I also include the old version that uses the dense graph in the new file uploaded to be able to compare accuracy and memory consumption. I post the results of accuracy and memory tests as attached txt files in this ticked to avoid very long comments. Salut! PS: I'm still at Austin ;) It was really great to be able to attend Scipy conference and to work with you all!! Attachment in Trac by @jtorrents, 2011-07-17: [anticore.py](https://networkx.lanl.gov/trac/attachment/ticket/589/anticore.py) Attachment in Trac by @jtorrents, 2011-07-17: [k_components_1.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components_1.py) Attachment in Trac by @jtorrents, 2011-07-17: [output_accuracy_tests.txt](https://networkx.lanl.gov/trac/attachment/ticket/589/output_accuracy_tests.txt) Attachment in Trac by @jtorrents, 2011-07-17: [memtest_k_components.py](https://networkx.lanl.gov/trac/attachment/ticket/589/memtest_k_components.py) Attachment in Trac by @jtorrents, 2011-07-17: [output_memtest_k_components.txt](https://networkx.lanl.gov/trac/attachment/ticket/589/output_memtest_k_components.txt) Comment in Trac by @jtorrents, 2011-07-17 The memory tests are quite hackish, but so far is the easiest way that I've been able to found. It works using sys.getsizeof with the string representation of G.adj (ie str(G.adj)) as a principal metric for graph memory usage. It is not accurate but allows us to compare memory footprint between different graphs (see source code for details). The complement graph is 2 orders of magnitude smaller than the dense graph in most of the examples, so the memory footprint is quite smaller. Comment in Trac by @dschult, 2011-07-20 It looks like now your memory bottleneck is creating K (the dict-of-dict of pairwise vertex connectivity). If so, I think you can rearrange the code so you don't have to store those values. Instead, compute each K value and use it immediately. Something like: ''' H=nx.Graph() SG=G.subgraph(candidates) for u,v in itertools.combinations(SG,2): K_value=pairwise_vertex_connectivity(SG,u,v) if k_value < k: # complement graph H.add_edge(u,v) ''' Comment in Trac by @hagberg, 2011-07-21 Also to get this into the code base when it's ready the bicomponents function needs to be implemented. Comment in Trac by @jtorrents, 2011-07-21 Replying to [comment:5 dschult]: > It looks like now your memory bottleneck is creating K (the dict-of-dict of pairwise vertex connectivity). > > If so, I think you can rearrange the code so you don't have to store those values. Instead, compute each K value and use it immediately. Something like: > > ''' > H=nx.Graph() > SG=G.subgraph(candidates) > for u,v in itertools.combinations(SG,2): > K_value=pairwise_vertex_connectivity(SG,u,v) > if k_value < k: # complement graph > H.add_edge(u,v) > ''' That's a great idea! Thank you very much. I've implemented this approach in '''k_components_2.py''' and the memory footprint is greatly reduced compared with the implementation that also uses the complement graph. I have to work more in the creation of the complement graph in step 3 because the approximation using the dense graph is more accurate (see attached file '''output_accuracy_tests.txt''') and I don't understand why yet. First I thought that it was because the anticore algorithm does not take into account the bipartite nature of some networks, but after some thinking and some tests, it seems that the problem is in the creation of the complement graph and not in the anticore algorithm. I'll work on this and will post here my progress. Attachment in Trac by @jtorrents, 2011-07-21: [k_components_2.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components_2.py) Comment in Trac by @jtorrents, 2011-07-21 Replying to [comment:6 aric]: > Also to get this into the code base when it's ready the bicomponents function needs to be implemented. I'll work on the implementation of '''biconnected_components''' function (opening a new ticket). So far I've found two interesting python implementations: 1. Jesus Cerquides' translation of Igraph C implementation at https://bitbucket.org/cerquide/networkx-chordal-approx/src/2423f58039f5/networkx/algorithms/components/biconnected.py (he is the guy that implemented chordal algorithms in NetworkX) 2. David Eppstein's implementation at http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py I have to look at them more closely. Eppstein's implementation seems more elegant and concise but I do not fully understand it yet. When I have some code that works I'll open a new ticket for biconnected components. Comment in Trac by @dschult, 2011-07-21 While thinking about why the approximation would be off for the complement graph it occurs to me that the last step (finding the components) might be finding the components of the complement graph instead of the actual graph. If that's true, then we would have to fiddle with that algorithm too. I'm still thinking about a complement graph data structure so that we wouldn't have to rewrite algorithms but its tricky. Comment in Trac by @jtorrents, 2011-07-22 Replying to [comment:9 dschult]: > While thinking about why the approximation would be off for the complement graph it occurs to me that the last step (finding the components) might be finding the components of the complement graph instead of the actual graph. If that's true, then we would have to fiddle with that algorithm too. I'm still thinking about a complement graph data structure so that we wouldn't have to rewrite algorithms but its tricky. You are right, the problem is in the last step (finding the connected components of k-core subgraph of the auxiliary graph H) because we need the connected components of its complement graph. As you suggest, in order to avoid having to rewrite algorithms to work in the complement graph, I've implemented a first version of complement graph data structure in '''antigraph.py'''. I'm not sure that I did it correctly but it seems to work (now the approximation algorithm yield the same results both in the dense and complement versions). The updated approximation algorithm is in '''k_components_3.py'''. The new data structure inherits (is this the correct term?) form nx.Graph class and some methods ('''**getitem**, neighbors, neighbors_iter, degree, degree_iter''') are overwritten so it works as expected with the algorithms of interest ('''core_number, connected_components'''). I'm not sure if it would be necessary to overwrite other methods in order to make it work for other algorithms. For now the docstrings of the overwritten methods are not updated. Attachment in Trac by @jtorrents, 2011-07-22: [k_components_3.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components_3.py) Attachment in Trac by @jtorrents, 2011-07-22: [accuracy_tests_antigraph.txt](https://networkx.lanl.gov/trac/attachment/ticket/589/accuracy_tests_antigraph.txt) Attachment in Trac by @jtorrents, 2011-07-23: [antigraph.py](https://networkx.lanl.gov/trac/attachment/ticket/589/antigraph.py) Attachment in Trac by @jtorrents, 2011-07-23: [test_antigraph.py](https://networkx.lanl.gov/trac/attachment/ticket/589/test_antigraph.py) Comment in Trac by @jtorrents, 2011-07-23 I've updated the AntiGraph class and implemented some tests. Using it the complement version of the algorithm obtains the same accuracy than in the dense implementation. Now the memory footprint is quite smaller. I'm not sure if I should open a new ticket for the AntiGraph class; Dan, do you think that is interesting and general enough to be a ticket on its own? Comment in Trac by @dschult, 2011-07-25 Yes, lets split off the complement graph(antigraph) as a separate ticket. I think this version of it does very well and is the correct approach (add nodes and edges like a normal graph but report differently). This version is also sufficient for this problem. I'm thinking that other methods could be needed for other algorithms though so we should implement them too. And the docs should be rewritten--maybe with lots of references to to the standard graph class methods. So, is this footprint small enough to get you to try the big dataset you were thinking of? Comment in Trac by @jtorrents, 2011-07-26 Replying to [comment:12 dschult]: > Yes, lets split off the complement graph(antigraph) as a separate ticket. > > I think this version of it does very well and is the correct approach (add nodes and edges like a normal graph but report differently). This version is also sufficient for this problem. I'm thinking that other methods could be needed for other algorithms though so we should implement them too. And the docs should be rewritten--maybe with lots of references to to the standard graph class methods. I agree, this version is sufficient for this problem but we might have to think about other use cases in order to make it a solid data structure that could be included in NetworkX codebase. I have some ideas of new methods to implement; I'll comment on the new ticket. > So, is this footprint small enough to get you to try the big dataset you were thinking of? I did some tests and it seems that the memory footprint is quite small. I did not test it yet with the big datasets because I've found a problem with the approximation algorithm. I'm preparing a paper about this approximation algorithm and, as an example, I plan to include an analysis of Zachary's karate club. Analyzing this network I've found out that the approximation algorithm (as implemented in '''k_components_3.py''') does not detect the 4-component. This is because this implementation does not consider node independent paths between adjacent nodes in the original graph (eg '''parwise_vertex,connectivity(..., strict=True)'''). But in small dense networks it is necessary to do so in order to obtain accurate results. The problem is that if we actually consider node independent paths between adjacent nodes then the results obtained for large sparse networks are quite less accurate. As a first approach we could check the density of the k-core analyzed and then decide what to do for adjacent nodes (implemented in '''k_components_4.py'''). But what worries me is that I do not understand why this happens. I've put together more accuracy tests to show the problem in k_components_4.py and I attached the output as a .txt . The tests involving Karate network are at the end. Attachment in Trac by @jtorrents, 2011-07-26: [k_components_4.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components_4.py) Attachment in Trac by @jtorrents, 2011-07-26: [accuracy_tests_strict.txt](https://networkx.lanl.gov/trac/attachment/ticket/589/accuracy_tests_strict.txt) Comment in Trac by @jtorrents, 2011-08-24 Attachment in Trac by @jtorrents, 2011-08-24: [k_components_5.py](https://networkx.lanl.gov/trac/attachment/ticket/589/k_components_5.py) Attachment in Trac by @jtorrents, 2011-08-24: [accuracy_tests_k_components_5.txt](https://networkx.lanl.gov/trac/attachment/ticket/589/accuracy_tests_k_components_5.txt) Comment in Trac by @jtorrents, 2011-08-24 I've added a new version of the algorithm, a new description, and a new set of accuracy tests that, using the flow based connectivity code #625, checks the actual vertex connectivity for each k-component detected by the algorithm. As you can see, the accuracy tests are quite good, but there are still problems with dense networks with a lot of levels of connectivity. Analyzing the network in the file test_1m.adj (attached to this ticket) we get it right for k < 8 and for k > 12; in the middle the accuracy is quite poor and I think that this is because of the changes that I did in the algorithm: Now the last step consists in creating the induced subgraph of all nodes that are candidates to be part of a k-component, and remove all nodes with core number < k from this subgraph before considering it a k-component. The problem is that, after removing those nodes, other nodes of the subgraph could have core number < k because we deleted edges when removing nodes with core number < k. Thus, I'll have to improve these last step. Regarding memory footprint issues, most of the memory was used to approximate k=2 for large networks. Now we only compute the approximation for k >= 3. We compute the exact solution for bicomponents (which is fast, see #609) and take it as a base for the approximation to higher levels of connectivity. This way we get always right the biconnected structure (along with articualtion points) that were difficult to approximate right with the algorithm and also save a lot of memory in large networks with big bicomponents. The same could be done for tricomponents (and should be done) but they are not implemented in NetworkX (yet).
2015-03-18T13:02:15
networkx/networkx
1,472
networkx__networkx-1472
[ "1471" ]
d841f4c9cccd76a32b3385ac48a8704dee96db0b
diff --git a/networkx/generators/community.py b/networkx/generators/community.py --- a/networkx/generators/community.py +++ b/networkx/generators/community.py @@ -1,3 +1,4 @@ +"""Generators for classes of graphs used in studying social networks.""" import itertools import math import random @@ -15,18 +16,14 @@ def caveman_graph(l, k): - """Returns a caveman graph of l cliques of size k. - - The caveman graph is formed by creating n cliques of size k. + """Returns a caveman graph of ``l`` cliques of size ``k``. Parameters ---------- - n : int + l : int Number of cliques k : int Size of cliques - directed : boolean optional (default=True) - If true return directed caveman graph Returns ------- @@ -36,14 +33,19 @@ def caveman_graph(l, k): Notes ----- This returns an undirected graph, it can be converted to a directed - graph using nx.to_directed(), or a multigraph using - nx.MultiGraph(nx.caveman_graph). Only the undirected version is + graph using :func:`nx.to_directed`, or a multigraph using + ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is described in [1]_ and it is unclear which of the directed generalizations is most useful. Examples -------- - >>> G = nx.caveman_graph(3,3) + >>> G = nx.caveman_graph(3, 3) + + See also + -------- + + connected_caveman_graph Reference --------- @@ -61,36 +63,35 @@ def caveman_graph(l, k): def connected_caveman_graph(l, k): - """Returns a connected caveman graph of n cliques of size k. + """Returns a connected caveman graph of ``l`` cliques of size ``k``. - The connected caveman graph is formed by creating n cliques of size k. Then - a single node in each clique is rewired to a node in the adjacent clique. + The connected caveman graph is formed by creating ``n`` cliques of size + ``k``, then a single edge in each clique is rewired to a node in an + adjacent clique. Parameters ---------- - n : int + l : int number of cliques k : int size of cliques - directed : boolean optional (default=True) - if true return directed caveman graph Returns ------- G : NetworkX Graph - caveman graph + connected caveman graph Notes ----- This returns an undirected graph, it can be converted to a directed - graph using nx.to_directed(), or a multigraph using - nx.MultiGraph(nx.caveman_graph). Only the undirected version is + graph using :func:`nx.to_directed`, or a multigraph using + ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is described in [1]_ and it is unclear which of the directed generalizations is most useful. Examples -------- - >>> G = nx.caveman_graph(3, 3) + >>> G = nx.connected_caveman_graph(3, 3) Reference --------- @@ -105,12 +106,11 @@ def connected_caveman_graph(l, k): return G -def relaxed_caveman_graph(l, k, p, seed=None, directed=False): +def relaxed_caveman_graph(l, k, p, seed=None): """Return a relaxed caveman graph. - A relaxed caveman graph starts with l cliques of size k. Edges - are then randomly rewired with probability p to link different - cliques. + A relaxed caveman graph starts with ``l`` cliques of size ``k``. Edges are + then randomly rewired with probability ``p`` to link different cliques. Parameters ---------- @@ -122,8 +122,6 @@ def relaxed_caveman_graph(l, k, p, seed=None, directed=False): Probabilty of rewiring each edge. seed : int,optional Seed for random number generator(default=None) - directed : bool,optional (default=False) - If True return a directed graph Returns -------
DOC: caveman graph generator In the functions `caveman_graph` and `connected_caveman_graph`, the number of cliques needs to be `l` instead of `n`. Also, I couldn't find these functions (the community generators) on the website. I had to dig through the code.
2015-04-23T00:52:02
networkx/networkx
1,544
networkx__networkx-1544
[ "1593" ]
6182e6bcd7e4c226b8217fed7d2a9095fd76c278
diff --git a/networkx/relabel.py b/networkx/relabel.py --- a/networkx/relabel.py +++ b/networkx/relabel.py @@ -107,6 +107,8 @@ def _relabel_inplace(G, mapping): new = mapping[old] except KeyError: continue + if new == old: + continue try: G.add_node(new, attr_dict=G.node[old]) except KeyError:
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py --- a/networkx/tests/test_relabel.py +++ b/networkx/tests/test_relabel.py @@ -137,6 +137,13 @@ def test_relabel_nodes_multidigraph(self): assert_equal(sorted(G.edges()), [('aardvark', 'bear'), ('aardvark', 'bear')]) + def test_relabel_isolated_nodes_to_same(self): + G=Graph() + G.add_nodes_from(range(4)) + mapping={1:1} + H=relabel_nodes(G, mapping, copy=False) + assert_equal(sorted(H.nodes()), list(range(4))) + @raises(KeyError) def test_relabel_nodes_missing(self): G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
Added implementation of SimRank matrix of the graph.
2015-05-22T18:56:05
networkx/networkx
1,707
networkx__networkx-1707
[ "1695" ]
3ec1624e0121933ada3045869329e013ba9432d9
diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py deleted file mode 120000 --- a/examples/drawing/atlas.py +++ /dev/null @@ -1 +0,0 @@ -../graph/atlas.py \ No newline at end of file diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py new file mode 100644 --- /dev/null +++ b/examples/drawing/atlas.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +""" +Atlas of all graphs of 6 nodes or less. + +""" +__author__ = """Aric Hagberg ([email protected])""" +# Copyright (C) 2004 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. + +import networkx as nx +from networkx.generators.atlas import * +from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic +import random + +def atlas6(): + """ Return the atlas of all connected graphs of 6 nodes or less. + Attempt to check for isomorphisms and remove. + """ + + Atlas=graph_atlas_g()[0:208] # 208 + # remove isolated nodes, only connected graphs are left + U=nx.Graph() # graph for union of all graphs in atlas + for G in Atlas: + zerodegree=[n for n in G if G.degree(n)==0] + for n in zerodegree: + G.remove_node(n) + U=nx.disjoint_union(U,G) + + # list of graphs of all connected components + C=nx.connected_component_subgraphs(U) + + UU=nx.Graph() + # do quick isomorphic-like check, not a true isomorphism checker + nlist=[] # list of nonisomorphic graphs + for G in C: + # check against all nonisomorphic graphs so far + if not iso(G,nlist): + nlist.append(G) + UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs + return UU + +def iso(G1, glist): + """Quick and dirty nonisomorphism checker used to check isomorphisms.""" + for G2 in glist: + if isomorphic(G1,G2): + return True + return False + + +if __name__ == '__main__': + + import networkx as nx + + G=atlas6() + + print("graph has %d nodes with %d edges"\ + %(nx.number_of_nodes(G),nx.number_of_edges(G))) + print(nx.number_connected_components(G),"connected components") + + + try: + from networkx import graphviz_layout + except ImportError: + raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot") + + import matplotlib.pyplot as plt + plt.figure(1,figsize=(8,8)) + # layout graphs with positions using graphviz neato + pos=nx.graphviz_layout(G,prog="neato") + # color nodes the same in each connected subgraph + C=nx.connected_component_subgraphs(G) + for g in C: + c=[random.random()]*nx.number_of_nodes(g) # random color... + nx.draw(g, + pos, + node_size=40, + node_color=c, + vmin=0.0, + vmax=1.0, + with_labels=False + ) + plt.savefig("atlas.png",dpi=75) diff --git a/examples/drawing/chess_masters.py b/examples/drawing/chess_masters.py deleted file mode 120000 --- a/examples/drawing/chess_masters.py +++ /dev/null @@ -1 +0,0 @@ -../multigraph/chess_masters.py \ No newline at end of file diff --git a/examples/drawing/chess_masters.py b/examples/drawing/chess_masters.py new file mode 100644 --- /dev/null +++ b/examples/drawing/chess_masters.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python + +""" +An example of the MultiDiGraph clas + +The function chess_pgn_graph reads a collection of chess +matches stored in the specified PGN file +(PGN ="Portable Game Notation") +Here the (compressed) default file --- + chess_masters_WCC.pgn.bz2 --- +contains all 685 World Chess Championship matches +from 1886 - 1985. +(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php) + +The chess_pgn_graph() function returns a MultiDiGraph +with multiple edges. Each node is +the last name of a chess master. Each edge is directed +from white to black and contains selected game info. + +The key statement in chess_pgn_graph below is + G.add_edge(white, black, game_info) +where game_info is a dict describing each game. + +""" +# Copyright (C) 2006-2010 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. + +import networkx as nx + +# tag names specifying what game info should be +# stored in the dict on each digraph edge +game_details=["Event", + "Date", + "Result", + "ECO", + "Site"] + +def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"): + """Read chess games in pgn format in pgn_file. + + Filenames ending in .gz or .bz2 will be uncompressed. + + Return the MultiDiGraph of players connected by a chess game. + Edges contain game data in a dict. + + """ + import bz2 + G=nx.MultiDiGraph() + game={} + datafile = bz2.BZ2File(pgn_file) + lines = (line.decode().rstrip('\r\n') for line in datafile) + for line in lines: + if line.startswith('['): + tag,value=line[1:-1].split(' ',1) + game[str(tag)]=value.strip('"') + else: + # empty line after tag set indicates + # we finished reading game info + if game: + white=game.pop('White') + black=game.pop('Black') + G.add_edge(white, black, **game) + game={} + return G + + +if __name__ == '__main__': + import networkx as nx + + + G=chess_pgn_graph() + + ngames=G.number_of_edges() + nplayers=G.number_of_nodes() + + print("Loaded %d chess games between %d players\n"\ + % (ngames,nplayers)) + + # identify connected components + # of the undirected version + Gcc=list(nx.connected_component_subgraphs(G.to_undirected())) + if len(Gcc)>1: + print("Note the disconnected component consisting of:") + print(Gcc[1].nodes()) + + # find all games with B97 opening (as described in ECO) + openings=set([game_info['ECO'] + for (white,black,game_info) in G.edges(data=True)]) + print("\nFrom a total of %d different openings,"%len(openings)) + print('the following games used the Sicilian opening') + print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n') + + for (white,black,game_info) in G.edges(data=True): + if game_info['ECO']=='B97': + print(white,"vs",black) + for k,v in game_info.items(): + print(" ",k,": ",v) + print("\n") + + + try: + import matplotlib.pyplot as plt + except ImportError: + import sys + print("Matplotlib needed for drawing. Skipping") + sys.exit(0) + + # make new undirected graph H without multi-edges + H=nx.Graph(G) + + # edge width is proportional number of games played + edgewidth=[] + for (u,v,d) in H.edges(data=True): + edgewidth.append(len(G.get_edge_data(u,v))) + + # node size is proportional to number of games won + wins=dict.fromkeys(G.nodes(),0.0) + for (u,v,d) in G.edges(data=True): + r=d['Result'].split('-') + if r[0]=='1': + wins[u]+=1.0 + elif r[0]=='1/2': + wins[u]+=0.5 + wins[v]+=0.5 + else: + wins[v]+=1.0 + try: + pos=nx.graphviz_layout(H) + except: + pos=nx.spring_layout(H,iterations=20) + + plt.rcParams['text.usetex'] = False + plt.figure(figsize=(8,8)) + nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m') + nodesize=[wins[v]*50 for v in H] + nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4) + nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k') + nx.draw_networkx_labels(H,pos,fontsize=14) + font = {'fontname' : 'Helvetica', + 'color' : 'k', + 'fontweight' : 'bold', + 'fontsize' : 14} + plt.title("World Chess Championship Games: 1886 - 1985", font) + + # change font and write text (using data coordinates) + font = {'fontname' : 'Helvetica', + 'color' : 'r', + 'fontweight' : 'bold', + 'fontsize' : 14} + + plt.text(0.5, 0.97, "edge width = # games played", + horizontalalignment='center', + transform=plt.gca().transAxes) + plt.text(0.5, 0.94, "node size = # games won", + horizontalalignment='center', + transform=plt.gca().transAxes) + + plt.axis('off') + plt.savefig("chess_masters.png",dpi=75) + print("Wrote chess_masters.png") + plt.show() # display
Symbolic links cause test failures on AppVeyor Symbolic links introduced in #1467 cause breakage when installing with Python 3 on Windows. Found this problem when testing `networkx-metis`: https://ci.appveyor.com/project/chebee7i/networkx-metis-1lf0f/build/1.0.91/job/4ifakb2gfdgakvoy @hagberg The problem is present in 1.10. We should consider a 1.10.1 release since this affects installation.
2015-08-04T19:55:05
networkx/networkx
1,721
networkx__networkx-1721
[ "1718" ]
c06c61848447157a200f01f924f9cdeb8309ed1c
diff --git a/networkx/convert.py b/networkx/convert.py --- a/networkx/convert.py +++ b/networkx/convert.py @@ -89,10 +89,10 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): result= from_dict_of_dicts(data.adj,\ create_using=create_using,\ multigraph_input=data.is_multigraph()) - if hasattr(data,'graph') and isinstance(data.graph,dict): - result.graph=data.graph.copy() - if hasattr(data,'node') and isinstance(data.node,dict): - result.node=dict( (n,dd.copy()) for n,dd in data.node.items() ) + if hasattr(data,'graph'): # data.graph should be dict-like + result.graph.update(data.graph) + if hasattr(data,'node'): # data.node should be dict-like + result.node.update( (n,dd.copy()) for n,dd in data.node.items() ) return result except: raise nx.NetworkXError("Input is not a correct NetworkX graph.")
diff --git a/networkx/tests/test_convert.py b/networkx/tests/test_convert.py --- a/networkx/tests/test_convert.py +++ b/networkx/tests/test_convert.py @@ -215,3 +215,12 @@ def test_directed_to_undirected(self): assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(),edges1)) assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(),edges1)) + + def test_attribute_dict_integrity(self): + # we must not replace dict-like graph data structures with dicts + G=OrderedGraph() + G.add_nodes_from("abc") + H=to_networkx_graph(G, create_using=OrderedGraph()) + assert_equal(list(H.node),list(G.node)) + H=OrderedDiGraph(G) + assert_equal(list(H.node),list(G.node))
Lose digraph ordering when creating a duplicate I am seeing the following, which doesn't look right at all. ``` >>> import networkx as nx >>> g = nx.OrderedDiGraph() >>> g.add_node('a') >>> g.add_node('b') >>> g.add_node('c') >>> list(g) ['a', 'b', 'c'] >>> g1 = nx.OrderedDiGraph(g) >>> list(g1) ['a', 'c', 'b'] ``` It seems the order is being lost when the graph is being copied (which is the recommended way to generate a new graph, if the prior one is frozen for example); this doesn't feel like what would be the expected result here. ``` $ pip freeze | grep networkx networkx==1.10 $ python --version Python 2.7.10 ```
Trying to work around this using the convert routine directly, didn't seem to fix the issue. ``` >>> from networkx import convert >>> g2 = convert.to_networkx_graph(g, create_using=nx.OrderedDiGraph()) >>> list(g2) ['a', 'c', 'b'] ``` The following does seem to work, but likely isn't really a _good_ solution. ``` >>> g3 = nx.OrderedDiGraph() >>> g3.add_nodes_from(g.nodes_iter()) >>> g3.add_edges_from(g.edges_iter()) >>> list(g3) ['a', 'b', 'c'] ``` This looks like a bug.... Thanks for reporting it. I'll look into it. Thanks!
2015-08-07T21:23:34
networkx/networkx
1,732
networkx__networkx-1732
[ "1704" ]
8f3a9020e03891f449cc83509272a8bfd4de35c8
diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py --- a/networkx/algorithms/centrality/eigenvector.py +++ b/networkx/algorithms/centrality/eigenvector.py @@ -69,12 +69,15 @@ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, Notes ------ - The measure was introduced by [1]_. + The measure was introduced by [1]_ and is discussed in [2]_. - The eigenvector calculation is done by the power iteration method and has - no guarantee of convergence. The iteration will stop after ``max_iter`` - iterations or an error tolerance of ``number_of_nodes(G)*tol`` has been - reached. + Eigenvector convergence: The power iteration method is used to compute + the eigenvector and convergence is not guaranteed. Our method stops after + ``max_iter`` iterations or when the vector change is below an error + tolerance of ``number_of_nodes(G)*tol``. We actually use (A+I) rather + than the adjacency matrix A because it shifts the spectrum to enable + discerning the correct eigenvector even for networks with multiple + dominant eigenvalues. For directed graphs this is "left" eigenvector centrality which corresponds to the in-edges in the graph. For out-edges eigenvector centrality @@ -111,8 +114,8 @@ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, # make up to max_iter iterations for i in range(max_iter): xlast = x - x = dict.fromkeys(xlast, 0) - # do the multiplication y^T = x^T A + x = xlast.copy() # Start with xlast times I to iterate with (A+I) + # do the multiplication y^T = x^T A (left eigenvector) for n in x: for nbr in G[n]: x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
diff --git a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py --- a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py +++ b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -41,6 +41,9 @@ def test_P3(self): b=networkx.eigenvector_centrality_numpy(G) for n in sorted(G): assert_almost_equal(b[n],b_answer[n],places=4) + b=networkx.eigenvector_centrality(G) + for n in sorted(G): + assert_almost_equal(b[n],b_answer[n],places=4) def test_P3_unweighted(self):
eigenvector_centrality does not converge for 'periodic' graphs The way eigenvector_centrality is implemented, it cannot work for some 'periodic' graphs because of the way power iteration works. (By periodicity, I mean the same definition than a periodicity of a Markov Chain cf https://en.wikipedia.org/wiki/Markov_chain paragraph 4.2). For example, it will not work for nx.path_graph(n) where n is an odd number. For example, the eigenvector in the main loop the the graph nx.path_graph(3) oscillates between these two values: ['0 0.333', '1 0.333', '2 0.333'] ['0 0.408', '1 0.816', '2 0.408'] ['0 0.577', '1 0.577', '2 0.577'] ['0 0.408', '1 0.816', '2 0.408'] ['0 0.577', '1 0.577', '2 0.577'] ['0 0.408', '1 0.816', '2 0.408'] ['0 0.577', '1 0.577', '2 0.577'] etc... This convergence issue is linked to the power iteration method and to the fact that the graph is periodic. One easy way to solve this issue is to make the graph aperiodic by adding to the adjacency matrix a non-null diagonal when doing the power iteration method. For example, we could insert in line https://github.com/networkx/networkx/blob/master/networkx/algorithms/centrality/eigenvector.py#L119 the following (outside of the second loop): x[n] += xlast[n] This cheap trick works in my case.
You are right. The power method the way we have implemented it won't converge for adjacency matrices with a pair of extremal eigenvalues with opposite signs. The odd-node path graphs have that property. Maybe the solution is to use power iteration on A^2? @dschult? As I said, doing the power iteration on A + delta_I where I is the identity matrix works well. It does not change the eigenvector to add delta_I to A. I'm using it on my own code and so far it seems to behave nicely. Yes, that is the "shifted power method". I think it won't work in general for this case unless you know what shift to use. As you know, the power iteration method will not work for general networks/matrices. In cases where degenerate eigenvalues are dominant, convergence is not guaranteed. In the example here (odd order path graphs) a negative eigenvalue has the same magnitude as the largest positive eigenvalue. Using A^2 creates a 2-D space of eigenvectors with largest eigenvalues: any linear combination of the dominant eigenvectors of A. Not all of them are eigenvectors of A however. Only the original eigenvectors of A work. All the non-trivial linear combinations of them are eigenvectors of A^2 but not of A. For example, if A_v1 = v1 and A_v2 = -v2, then A^2_(v1+v2) = (v1+v2). So (v1+v2) is an eigenvector of A^2. But A_(v1+v2) = v1 - v2. I cannot find this written up anywhere, but it is true: while eigenvectors of A must be eigenvectors of A^2, eigenvectors of A^2 do not need to be eigenvectors of A. Luckily, Perron-Frobenius comes to the rescue (as it often seems to). So long as G is strongly connected (connected for an undirected network) the adjacency matrix is non-negative and irreducible. Thus the dominant eigenvalues include a positive real value r with one dimensional eigenspace represented by a non-negative eigenvector. Also, all other dominant eigenvalues are evenly spread about the circle in the complex plane centered at zero with radius r. So the entire spectrum of A lies on or in that circle, and the eigenvalue we care about is on the positive real axis. If we shift the spectrum to the right by considering the matrix (A+I), we obtain a single dominant eigenvalue r+1 with the same eigenvector that corresponds to the eigenvalue r of A. So, to find the eigenvector centrality using the power method we should multiply by A+I. (Adding other multiples of the identity to A will also work, but A+I is cheap to implement and sufficient for strongly connected networks.) I can't find this written anywhere, so maybe it should be. If anyone has seen this please let me know. This change expands the usefulness of our simple power iteration method from strongly connected networks with non-degenerate adj matrices to all strongly connected networks. I'll put together a PR to implement it. Thanks @tantto for suggesting it and @hagberg for nugding me into what turned out to be a productive rabbit hole. :} Great. It seems that as long as we shift the eigenvalues to the right (A + alpha I, where alpha>0) this should be safe and will compute the eigenvalue/eigenvector pair we want.
2015-08-10T19:07:10
networkx/networkx
1,744
networkx__networkx-1744
[ "1487" ]
0f5ed91c2f1b6ddf9aa017cccace119ab2663a23
diff --git a/networkx/readwrite/graphml.py b/networkx/readwrite/graphml.py --- a/networkx/readwrite/graphml.py +++ b/networkx/readwrite/graphml.py @@ -41,6 +41,7 @@ __all__ = ['write_graphml', 'read_graphml', 'generate_graphml', 'parse_graphml', 'GraphMLWriter', 'GraphMLReader'] +from collections import defaultdict import networkx as nx from networkx.utils import open_file, make_str import warnings @@ -53,13 +54,17 @@ pass @open_file(1,mode='wb') -def write_graphml(G, path, encoding='utf-8',prettyprint=True): +def write_graphml(G, path, encoding='utf-8', prettyprint=True, infer_numeric_types=False): """Write G in GraphML XML format to path Parameters ---------- G : graph A networkx graph + infer_numeric_types : boolean + Determine if numeric types should be generalized despite different python values. + For example, if edges have both int and float 'weight' attributes, it will be + inferred in GraphML that they are both floats (which translates to double in GraphML). path : file or string File or filename to write. Filenames ending in .gz or .bz2 will be compressed. @@ -78,7 +83,7 @@ def write_graphml(G, path, encoding='utf-8',prettyprint=True): This implementation does not support mixed graphs (directed and unidirected edges together) hyperedges, nested graphs, or ports. """ - writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint) + writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint,infer_numeric_types=infer_numeric_types) writer.add_graph_element(G) writer.dump(path) @@ -230,12 +235,13 @@ class GraphML(object): } class GraphMLWriter(GraphML): - def __init__(self, graph=None, encoding="utf-8",prettyprint=True): + def __init__(self, graph=None, encoding="utf-8", prettyprint=True, infer_numeric_types=False): try: import xml.etree.ElementTree except ImportError: raise ImportError('GraphML writer requires ' 'xml.elementtree.ElementTree') + self.infer_numeric_types = infer_numeric_types self.prettyprint=prettyprint self.encoding = encoding self.xml = Element("graphml", @@ -244,6 +250,8 @@ def __init__(self, graph=None, encoding="utf-8",prettyprint=True): 'xsi:schemaLocation':self.SCHEMALOCATION} ) self.keys={} + self.attributes = defaultdict(list) + self.attribute_types = defaultdict(set) if graph is not None: self.add_graph_element(graph) @@ -255,6 +263,37 @@ def __str__(self): s=tostring(self.xml).decode(self.encoding) return s + def attr_type(self, name, scope, value): + """Infer the attribute type of data named name. Currently this only + supports inference of numeric types. + + If self.infer_numeric_types is false, type is used. Otherwise, pick the + most general of types found across all values with name and scope. This + means edges with data named 'weight' are treated separately from nodes + with data named 'weight'. + """ + if self.infer_numeric_types: + types = self.attribute_types[(name, scope)] + + try: + chr(12345) # Fails on Py!=3. + long = int # Py3K's int is our long type + except ValueError: + # Python 2.x + pass + + if len(types) > 1: + if float in types: + return float + elif long in types: + return long + else: + return int + else: + return list(types)[0] + else: + return type(value) + def get_key(self, name, attr_type, scope, default): keys_key = (name, attr_type, scope) try: @@ -292,13 +331,12 @@ def add_data(self, name, element_type, value, return data_element def add_attributes(self, scope, xml_obj, data, default): - """Appends attributes to edges or nodes. + """Appends attribute data to edges or nodes, and stores type information + to be added later. See add_graph_element. """ for k,v in data.items(): - default_value=default.get(k) - obj=self.add_data(make_str(k), type(v), make_str(v), - scope=scope, default=default_value) - xml_obj.append(obj) + self.attribute_types[(make_str(k), scope)].add(type(v)) + self.attributes[xml_obj].append([k, v, scope, default.get(k)]) def add_nodes(self, G, graph_element): for node,data in G.nodes(data=True): @@ -349,8 +387,19 @@ def add_graph_element(self, G): self.add_attributes("graph", graph_element, data, default) self.add_nodes(G,graph_element) self.add_edges(G,graph_element) + + # self.attributes contains a mapping from XML Objects to a list of + # data that needs to be added to them. + # We postpone processing of this in order to do type inference/generalization. + # See self.attr_type + for (xml_obj, data) in self.attributes.items(): + for (k, v, scope, default) in data: + xml_obj.append(self.add_data(make_str(k), self.attr_type(k, scope, v), make_str(v), + scope, default)) + self.xml.append(graph_element) + def add_graphs(self, graph_list): """ Add many graphs to this GraphML document. @@ -526,10 +575,10 @@ def decode_data_elements(self, graphml_keys, obj_xml): # check all the diffrent types of edges avaivable in yEd. for e in ['PolyLineEdge', 'SplineEdge', 'QuadCurveEdge', 'BezierEdge', 'ArcEdge']: - edge_label = data_element.find("{%s}%s/{%s}EdgeLabel"% + edge_label = data_element.find("{%s}%s/{%s}EdgeLabel"% (self.NS_Y, e, (self.NS_Y))) - if edge_label is not None: - break + if edge_label is not None: + break if edge_label is not None: data['label'] = edge_label.text
diff --git a/networkx/readwrite/tests/test_graphml.py b/networkx/readwrite/tests/test_graphml.py --- a/networkx/readwrite/tests/test_graphml.py +++ b/networkx/readwrite/tests/test_graphml.py @@ -121,6 +121,34 @@ def setUp(self): self.attribute_graph.add_edge('n5','n4',id='e6',weight=1.1) self.attribute_fh = io.BytesIO(self.attribute_data.encode('UTF-8')) + self.attribute_numeric_type_data = """<?xml version='1.0' encoding='utf-8'?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <key attr.name="weight" attr.type="double" for="node" id="d1" /> + <key attr.name="weight" attr.type="double" for="edge" id="d0" /> + <graph edgedefault="directed"> + <node id="n0"> + <data key="d1">1</data> + </node> + <node id="n1"> + <data key="d1">2.0</data> + </node> + <edge source="n0" target="n1"> + <data key="d0">1</data> + </edge> + <edge source="n1" target="n1"> + <data key="d0">1.0</data> + </edge> + </graph> +</graphml> +""" + + self.attribute_numeric_type_graph = nx.DiGraph() + self.attribute_numeric_type_graph.add_node('n0', weight=1) + self.attribute_numeric_type_graph.add_node('n1', weight=2.0) + self.attribute_numeric_type_graph.add_edge('n0', 'n1', weight=1) + self.attribute_numeric_type_graph.add_edge('n1', 'n1', weight=1.0) + self.attribute_numeric_type_fh = io.BytesIO(self.attribute_numeric_type_data.encode('UTF-8')) + self.simple_undirected_data="""<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> @@ -187,6 +215,33 @@ def test_read_simple_undirected_graphml(self): sorted(sorted(e) for e in G.edges()), sorted(sorted(e) for e in I.edges())) + def test_write_read_attribute_numeric_type_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_numeric_type_graph + fh = io.BytesIO() + nx.write_graphml(G, fh, infer_numeric_types=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert_equal(sorted(G.nodes()), sorted(H.nodes())) + assert_equal(sorted(G.edges()), sorted(H.edges())) + assert_equal(sorted(G.edges(data=True)), + sorted(H.edges(data=True))) + self.attribute_numeric_type_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = xml.getroot().getchildren() + assert_equal(len(children), 3) + + keys = [child.items() for child in children[:2]] + + assert_equal(len(keys), 2) + assert_in(('attr.type', 'double'), keys[0]) + assert_in(('attr.type', 'double'), keys[1]) + def test_read_attribute_graphml(self): G=self.attribute_graph H=nx.read_graphml(self.attribute_fh) @@ -442,4 +497,3 @@ def test_bool(self): H=nx.parse_graphml(s) assert_equal(H.node['n0']['test'],True) assert_equal(H.node['n2']['test'],False) -
write_graphml does not recast properties with same name but different value types ``` g = nx.DiGraph() g.add_node('a') g.add_node('b') g.node['a']['count'] = 1 g.node['b']['count'] = float(.5) nx.write_graphml(g, "g.graphml") ``` The above code will result in two separate attribute keys within the XML file, one for the int and one for the float. Expected operation would be for the 'count' property to be abstracted to the broadest type which could contain all values (in this case, float), and saved as a single attribute.
Good catch. I think we do a single-pass right now. This will require two passes but probably should be done. If you wait to write the attribute type until the end, you can keep track of the property types encountered for a property and write the appropriate type to encompass them all at the end. This could get tricky if the types are not so easily abstracted. For example if someone has an attribute with '1', 1, True ,and "True" as values, etc. Maybe we shouldn't attempt to do something fancy here? I def don't think we should be converting non-numeric types to numeric...and worrying about the numerous ways of representing bools is a pain. But numeric attributes seem like an easy use case to handle specially. I'd imagine the cases where one wants mixed numeric types is near nonexistant, and so if we see both int and float, there seems to be an obvious answer in terms of what the user would like during conversion...very similar to what most (friendly) CSV readers when a column contains numeric looking data but with only some having decimals in them. I guess we'd at least need to allow this inference to be be able to be disabled. boolean -> int -> long -> float -> double -> string. Take the one most to the right? @gdbassett, if there were an interface that allowed you to specify the type for each attribute, would that be sufficent? Or would you want it automatically inferred? I think it should be inferred. The simple nature of the write_\* functions is one of the benefits of the module.
2015-08-23T18:25:30
networkx/networkx
1,781
networkx__networkx-1781
[ "1780" ]
328d899fb17bac857eedd9ec3e03f4ede9b11c63
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py --- a/networkx/readwrite/gml.py +++ b/networkx/readwrite/gml.py @@ -286,7 +286,7 @@ def parse_gml_lines(lines, label, destringizer): """ def tokenize(): patterns = [ - r'[A-Za-z][0-9A-Za-z]*\s+', # keys + r'[A-Za-z][0-9A-Za-z_]*\s+', # keys r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?', # reals r'[+-]?[0-9]+', # ints r'".*?"', # strings
diff --git a/networkx/readwrite/tests/test_gml.py b/networkx/readwrite/tests/test_gml.py --- a/networkx/readwrite/tests/test_gml.py +++ b/networkx/readwrite/tests/test_gml.py @@ -65,6 +65,90 @@ def setUp(self): ] ] """ + def test_parse_gml_cytoscape_bug(self): + # example from issue #321, originally #324 in trac + cytoscape_example = """ +Creator "Cytoscape" +Version 1.0 +graph [ + node [ + root_index -3 + id -3 + graphics [ + x -96.0 + y -67.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node2" + ] + node [ + root_index -2 + id -2 + graphics [ + x 63.0 + y 37.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node1" + ] + node [ + root_index -1 + id -1 + graphics [ + x -31.0 + y -17.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node0" + ] + edge [ + root_index -2 + target -2 + source -1 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] + edge [ + root_index -1 + target -1 + source -3 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] +] +""" + nx.parse_gml(cytoscape_example) def test_parse_gml(self): G = nx.parse_gml(self.simple_data, label='label')
Cannot parse GML files from Cytoscape, possible regression? This does not appear to be a duplicate of #321 -- I cannot parse .GML files produced from Cytoscape, including the "simple.gml" [file](https://networkx.lanl.gov/trac/attachment/ticket/324/simple.gml) as reported in #321. Below is the output I receive if parsing the file provided in that issue. From reading the comments, it looks like the suggestion is to either modify the GML parser directly in networkx (allowing slightly out of format GML files), or to modify the output from Cytoscape. If I issue a PR for the GML parser modification, would there be support for it? ``` python 10:20:28 (mcdonadt@8086):~> nx.__version__ Out[5]: '1.10' 10:20:32 (mcdonadt@8086):~> g = nx.read_gml('Downloads/simple.gml') --------------------------------------------------------------------------- NetworkXError Traceback (most recent call last) <ipython-input-6-e05f5232526f> in <module>() ----> 1 g = nx.read_gml('Downloads/simple.gml') /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in read_gml(path, label, destringizer) /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/utils/decorators.py in _open_file(func, *args, **kwargs) 218 # Finally, we call the original function, making sure to close the fobj. 219 try: --> 220 result = func(*new_args, **kwargs) 221 finally: 222 if close_fobj: /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in read_gml(path, label, destringizer) 208 yield line 209 --> 210 G = parse_gml_lines(filter_lines(path), label, destringizer) 211 return G 212 /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_gml_lines(lines, label, destringizer) 381 382 tokens = tokenize() --> 383 graph = parse_graph() 384 385 directed = graph.pop('directed', False) /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_graph() 370 371 def parse_graph(): --> 372 curr_token, dct = parse_kv(next(tokens)) 373 if curr_token[0] is not None: # EOF 374 unexpected(curr_token, 'EOF') /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_kv(curr_token) 355 curr_token = next(tokens) 356 elif type == 4: # dict start --> 357 curr_token, value = parse_dict(curr_token) 358 else: 359 unexpected(curr_token, "an int, float, string or '['") /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_dict(curr_token) 365 def parse_dict(curr_token): 366 curr_token = consume(curr_token, 4, "'['") # dict start --> 367 curr_token, dct = parse_kv(curr_token) 368 curr_token = consume(curr_token, 5, "']'") # dict end 369 return curr_token, dct /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_kv(curr_token) 355 curr_token = next(tokens) 356 elif type == 4: # dict start --> 357 curr_token, value = parse_dict(curr_token) 358 else: 359 unexpected(curr_token, "an int, float, string or '['") /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_dict(curr_token) 364 365 def parse_dict(curr_token): --> 366 curr_token = consume(curr_token, 4, "'['") # dict start 367 curr_token, dct = parse_kv(curr_token) 368 curr_token = consume(curr_token, 5, "']'") # dict end /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in consume(curr_token, type, expected) 334 def consume(curr_token, type, expected): 335 if curr_token[0] == type: --> 336 return next(tokens) 337 unexpected(curr_token, expected) 338 /Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in tokenize() 321 else: 322 raise NetworkXError('cannot tokenize %r at (%d, %d)' % --> 323 (line[pos:], lineno + 1, pos + 1)) 324 lineno += 1 325 yield (None, None, lineno + 1, 1) # EOF NetworkXError: cannot tokenize 'root_index\t-3' at (5, 3) ```
Maybe the problem is that the parser doesn't recognize tabs as whitespace separators? Yes, there would be support for such a PR. Check out the suggestion from @hagberg... I thought the underlying issue was that the spec doesn't include support for "_" characters? I did a terrible `sed` to replace "root_index" with "rootindex" and parsing was able to proceed but hit another wall on a another token that had an underscore. Oh, right. That isn't part of the spec. We'd need to add something like r'[A-Za-z][0-9A-Za-z_]*\s+', # keys to the parser to accept that.
2015-09-29T20:34:48
networkx/networkx
1,792
networkx__networkx-1792
[ "1784" ]
76d040056bd935a95874058ed97b7d73eabca64a
diff --git a/networkx/algorithms/shortest_paths/weighted.py b/networkx/algorithms/shortest_paths/weighted.py --- a/networkx/algorithms/shortest_paths/weighted.py +++ b/networkx/algorithms/shortest_paths/weighted.py @@ -346,8 +346,10 @@ def _dijkstra(G, source, get_weight, pred=None, paths=None, cutoff=None, pop = heappop dist = {} # dictionary of final distances seen = {source: 0} + # fringe is heapq with 3-tuples (distance,c,node) + # use the count c to avoid comparing nodes (may not be able to) c = count() - fringe = [] # use heapq with (distance,label) tuples + fringe = [] push(fringe, (0, next(c), source)) while fringe: (d, _, v) = pop(fringe)
Minor inefficiency in dijkstra algorithms The dijkstra algorithms seem to all have `c=count()` in them. The variable `c` is used a few times, but never for anything that actually affects the calculation. I suspect it was put in during some testing. In my (limited) testing, it can be removed without altering any results.
It looks like c is used in the tuple that gets pushed onto the heap. So presumably the position in the heap will be different if we remove the `c` variable. Currently we push 3-tuples (uv-dist, next(c), u). If we push (uv-dist, u), and uv-dist is the same for two nodes, then the heap comparison will depend on comparison of nodes `u`. We want to allow nodes that can't be compared. I think this is the reason we include the value c here. Maybe a comment in the code to that effect would be helpful.
2015-10-03T04:31:18
networkx/networkx
1,809
networkx__networkx-1809
[ "1808" ]
87a620eae4f0afaa9ab9fd86e87e64b40dac3685
diff --git a/networkx/algorithms/cycles.py b/networkx/algorithms/cycles.py --- a/networkx/algorithms/cycles.py +++ b/networkx/algorithms/cycles.py @@ -105,10 +105,9 @@ def cycle_basis(G,root=None): def simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. - An simple cycle, or elementary circuit, is a closed path where no - node appears twice, except that the first and last node are the same. - Two elementary circuits are distinct if they are not cyclic permutations - of each other. + A ``simple cycle``, or ``elementary circuit``, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. This is a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_. @@ -121,8 +120,8 @@ def simple_cycles(G): Returns ------- cycle_generator: generator - A generator that produces elementary cycles of the graph. Each cycle is - a list of nodes with the first and last nodes being the same. + A generator that produces elementary cycles of the graph. + Each cycle is represented by a list of nodes along the cycle. Examples -------- @@ -226,13 +225,12 @@ def _unblock(thisnode,blocked,B): def recursive_simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. - A simple cycle, or elementary circuit, is a closed path where no - node appears twice, except that the first and last node are the same. - Two elementary circuits are distinct if they are not cyclic permutations - of each other. + A ``simple cycle``, or ``elementary circuit``, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. This version uses a recursive algorithm to build a list of cycles. - You should probably use the iterator version caled simple_cycles(). + You should probably use the iterator version called simple_cycles(). Warning: This recursive version uses lots of RAM! Parameters @@ -242,8 +240,8 @@ def recursive_simple_cycles(G): Returns ------- - A list of circuits, where each circuit is a list of nodes, with the first - and last node being the same. + A list of cycles, where each cycle is represented by a list of nodes + along the cycle. Example:
Wrong doc or behavior for simple_cycles On 1.9.1, we can read on simple_cycles' documentation: Returns: cycle_generator: generator A generator that produces elementary cycles of the graph. **Each cycle is a list of nodes with the first and last nodes being the same.** [emphasis added] I wrote a simple test case and the result doesn't respect the sentence in bold. ``` python >>> import networkx as nx >>> graph = nx.DiGraph() >>> graph.add_nodes_from(['A', 'B', 'C']) >>> graph.add_edges_from({('A', 'B'), ('B', 'C'), ('C', 'A')}) >>> list(nx.simple_cycles(graph)) [['A', 'B', 'C']] ``` I thought the result would be `[['A', 'B', 'C', 'A']]` I also tested on the latest release 1.10 and I obtain the same result.
Thank you!! ... I'll get that fixed. Looks like it was changed in 1.8 without changing documentation. oops. `Each cycle is represented by a list of nodes along the cycle.`
2015-10-26T20:23:38
networkx/networkx
1,834
networkx__networkx-1834
[ "1832" ]
a866256a17529eb64081f2b5cfdbd6fe14fb675f
diff --git a/networkx/algorithms/cycles.py b/networkx/algorithms/cycles.py --- a/networkx/algorithms/cycles.py +++ b/networkx/algorithms/cycles.py @@ -351,7 +351,7 @@ def find_cycle(G, source=None, orientation='original'): ------- edges : directed edges A list of directed edges indicating the path taken for the loop. If - no cycle is found, then ``edges`` will be an empty list. For graphs, an + no cycle is found, then an exception is raised. For graphs, an edge is of the form (u, v) where ``u`` and ``v`` are the tail and head of the edge as determined by the traversal. For multigraphs, an edge is of the form (u, v, key), where ``key`` is the key of the edge. When the @@ -362,6 +362,11 @@ def find_cycle(G, source=None, orientation='original'): direction. When the direction is forward, the value of ``direction`` is 'forward'. When the direction is reverse, the value of ``direction`` is 'reverse'. + + Raises + ------ + NetworkXNoCycle + If no cycle was found. Examples --------
find_cycle() raises NetworkXNoCycle, contrary to doc The [doc for find_cycle() says](https://networkx.github.io/documentation/latest/reference/generated/networkx.algorithms.cycles.find_cycle.html#networkx.algorithms.cycles.find_cycle) > edges – A list of directed edges indicating the path taken for the loop. If no cycle is found, then edges will be an empty list. But the following snippet raises NetworkXNoCycle: ``` python import networkx from networkx.algorithms.cycles import find_cycle G = networkx.DiGraph([(0,1), (1,2)]) find_cycle(G) ``` I'm using networkx 1.10.
Yup, looks like no changes were made to the docs in d053488b1331e6f0205fafe4db198f6a815816fe where `NetworkXNoCycle` was implemented.
2015-11-04T08:29:03
networkx/networkx
1,862
networkx__networkx-1862
[ "1850" ]
8ae8f11fe595bf049fc5343dfe25927912c44440
diff --git a/networkx/algorithms/centrality/current_flow_betweenness.py b/networkx/algorithms/centrality/current_flow_betweenness.py --- a/networkx/algorithms/centrality/current_flow_betweenness.py +++ b/networkx/algorithms/centrality/current_flow_betweenness.py @@ -283,6 +283,13 @@ def edge_current_flow_betweenness_centrality(G, normalized=True, nodes : dictionary Dictionary of edge tuples with betweenness centrality as the value. + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + See Also -------- betweenness_centrality
Current-flow betweenness for directed graphs Exception ? Hi guys Did networkx provides algorithms for calculating the node (or edge) current-flow betweenness for _**directed graphs**_? I have try to call the function `edge_current_flow_betweenness_centrality`, but it raises a Exception saying "not working for directed graphs", but the document > edge_current_flow_betweenness_centrality(G, normalized=True, weight=’weight’, dtype=<type ‘float’>, solver=’full’) > Compute current-flow betweenness centrality for edges. > Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. > Current-flow betweenness centrality is also known as random-walk betweenness centrality 36. > **Parameters** > - **G** (graph) – A NetworkX graph > - normalized (bool, optional (default=True)) – If True the betweenness values are normalized > by 2/[(n-1)(n-2)] where n is the number of nodes in G. > - **weight** (string or None, optional (default=’weight’)) – Key for edge data used as the >edge > weight. If None, then use 1 as each edge weight. > - **dtype** (data type (float)) – Default data type for internal matrices. Set to np.float32 for > lower memory consumption. > - **solver** (string (default=’lu’)) – Type of linear solver to use for computing the flow matrix. > Options are “full” (uses most memory), “lu” (recommended), and “cg” (uses least >memory). > - **Returns nodes** – Dictionary of edge tuples with betweenness centrality as the value. > - **Return** type dictionary does not say anything about the Exception ? Perhaps, this exception should be noted in the document ? something like _**Raise Exception: if graph is not undirected**_
2015-11-24T17:57:10
networkx/networkx
1,908
networkx__networkx-1908
[ "1736" ]
e0479d2e090ec301de9612330585e9bc8d1f967c
diff --git a/networkx/algorithms/matching.py b/networkx/algorithms/matching.py --- a/networkx/algorithms/matching.py +++ b/networkx/algorithms/matching.py @@ -46,7 +46,7 @@ def maximal_matching(G): for u,v in G.edges(): # If the edge isn't covered, add it to the matching # then remove neighborhood of u and v from consideration. - if u not in nodes and v not in nodes: + if u not in nodes and v not in nodes and u!=v: matching.add((u,v)) nodes.add(u) nodes.add(v)
diff --git a/networkx/algorithms/tests/test_matching.py b/networkx/algorithms/tests/test_matching.py --- a/networkx/algorithms/tests/test_matching.py +++ b/networkx/algorithms/tests/test_matching.py @@ -247,6 +247,20 @@ def test_maximal_matching(): vset = set(u for u, v in matching) vset = vset | set(v for u, v in matching) + for edge in graph.edges(): + u, v = edge + ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \ + "not a proper matching!") + graph = nx.Graph() + graph.add_edge(1, 1) + graph.add_edge(1, 2) + graph.add_edge(2, 2) + graph.add_edge(2, 3) + matching = nx.maximal_matching(graph) + assert(len(matching)==1) + vset = set(u for u, v in matching) + vset = vset | set(v for u, v in matching) + for edge in graph.edges(): u, v = edge ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
maximal_matching and self loops `maximal_matching` does allow self-loops ``` py >>> G = nx.Graph([[1,1]]) >>> nx.matching.maximal_matching(G) {(1, 1)} ``` whereas `max_weight_matching` does not ``` py >>> nx.matching.max_weight_matching(G) {} ``` Is this expected behaviour? If not, a simple `u != v` check should fix it.
Interesting. I don't know what the correct definition is. But we should be consistent. Maybe don't allow self loops? If we document that and give an example of how to remove self loops then both cases should be covered. Anyone have comments on how to resolve this? I suggest not allowing self-loops in maximal matchings. Consider the path graph on two nodes with self-loops on both nodes. If self-loops are allowed, the maximum matching would be the set of two self-loops, which is a little surprising. This may be desirable in certain situations, but I think we should leave that as an example in the documentation if necessary.
2016-01-05T18:21:56
networkx/networkx
1,914
networkx__networkx-1914
[ "1812" ]
399fec8b80bf49e2ea90126b8598863897c01570
diff --git a/networkx/algorithms/connectivity/cuts.py b/networkx/algorithms/connectivity/cuts.py --- a/networkx/algorithms/connectivity/cuts.py +++ b/networkx/algorithms/connectivity/cuts.py @@ -285,7 +285,8 @@ def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): mapping = H.graph.get('mapping', None) if mapping is None: raise nx.NetworkXError('Invalid auxiliary digraph.') - + if G.has_edge(s, t) or G.has_edge(t, s): + return [] kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H) # The edge cut in the auxiliary digraph corresponds to the node cut in the
diff --git a/networkx/algorithms/connectivity/tests/test_cuts.py b/networkx/algorithms/connectivity/tests/test_cuts.py --- a/networkx/algorithms/connectivity/tests/test_cuts.py +++ b/networkx/algorithms/connectivity/tests/test_cuts.py @@ -238,6 +238,13 @@ def tests_min_cut_complete_directed(): for flow_func in flow_funcs: assert_equal(4, len(interface_func(G, flow_func=flow_func))) +def tests_minimum_st_node_cut(): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12]) + G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)]) + nodelist = minimum_st_node_cut(G, 7, 11) + assert(nodelist == []) + def test_invalid_auxiliary(): G = nx.complete_graph(5) assert_raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3,
minimum_node_cut gives a wrong result if there is an edge between source and target nodes in an directed Graph For the simple graph generated by the following code: ``` G = nx.Graph() G.add_nodes_from([0,1,2,3,4]) G.add_edges_from([(0,1),(0,2),(1,3),(3,4),(2,3)], weight = 1) print nx.algorithms.connectivity.minimum_node_cut(G,1,0) ``` The output is `set([2])`. While clearly, even with removing node 2 there would still be a path between 0 and 1 which is directly between them. I believe the proper output should be an empty set indicating there is no possible node cut.
2016-01-07T17:28:23
networkx/networkx
1,963
networkx__networkx-1963
[ "1959" ]
ec6dfae2aaebbbbf0a4620002ab795efa6430c25
diff --git a/networkx/algorithms/core.py b/networkx/algorithms/core.py --- a/networkx/algorithms/core.py +++ b/networkx/algorithms/core.py @@ -4,30 +4,41 @@ # Pieter Swart <[email protected]> # All rights reserved. # BSD license. +# +# Authors: Dan Schult ([email protected]) +# Jason Grout ([email protected]) +# Aric Hagberg ([email protected]) """ Find the k-cores of a graph. The k-core is found by recursively pruning nodes with degrees less than k. -See the following reference for details: +See the following references for details: An O(m) Algorithm for Cores Decomposition of Networks Vladimir Batagelj and Matjaz Zaversnik, 2003. http://arxiv.org/abs/cs.DS/0310049 -""" - -__author__ = "\n".join(['Dan Schult ([email protected])', - 'Jason Grout ([email protected])', - 'Aric Hagberg ([email protected])']) +Generalized Cores +Vladimir Batagelj and Matjaz Zaversnik, 2002. +http://arxiv.org/pdf/cs/0202039 -__all__ = ['core_number','k_core','k_shell','k_crust','k_corona','find_cores'] +For directed graphs a more general notion is that of D-cores which +looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core +is the k-core. +D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy +Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011. +http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf +""" import networkx as nx -from networkx import all_neighbors from networkx.exception import NetworkXError from networkx.utils import not_implemented_for +__all__ = ['core_number', 'find_cores', 'k_core', + 'k_shell', 'k_crust', 'k_corona'] + + @not_implemented_for('multigraph') def core_number(G): """Return the core number for each vertex. @@ -50,7 +61,8 @@ def core_number(G): Raises ------ NetworkXError - The k-core is not defined for graphs with self loops or parallel edges. + The k-core is not implemented for graphs with self loops + or parallel edges. Notes ----- @@ -66,9 +78,9 @@ def core_number(G): http://arxiv.org/abs/cs.DS/0310049 """ if G.number_of_selfloops() > 0: - raise NetworkXError( - 'Input graph has self loops; the core number is not defined.' - ' Consider using G.remove_edges_from(G.selfloop_edges()).') + msg = ('Input graph has self loops which is not permitted; ' + 'Consider using G.remove_edges_from(G.selfloop_edges()).') + raise NetworkXError(msg) degrees = dict(G.degree()) # Sort nodes by degree. nodes = sorted(degrees, key=degrees.get) @@ -81,7 +93,7 @@ def core_number(G): node_pos = {v: pos for pos, v in enumerate(nodes)} # The initial guess for the core number of a node is its degree. core = degrees - nbrs = {v: set(all_neighbors(G, v)) for v in G} + nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} for v in nodes: for u in nbrs[v]: if core[u] > core[v]: @@ -99,34 +111,34 @@ def core_number(G): find_cores = core_number -def _core_helper(G, func, k=None, core=None): - """Returns the subgraph induced by all nodes for which ``func`` - returns ``True``. - - ``G`` is a NetworkX graph. - - ``func`` is a function that takes three inputs: a node of ``G``, the - maximum core value, and the core number of the graph. The function - must return a Boolean value. +def _core_subgraph(G, k_filter, k=None, core=None): + """Returns the subgraph induced by nodes passing filter ``k_filter``. - ``k`` is the order of the core. If not specified, the maximum over - all core values will be returned. - - ``core`` is a dictionary mapping node to core numbers for that - node. If you have already computed it, you should provide it - here. If not specified, the core numbers will be computed from the - graph. + Parameters + ---------- + G : NetworkX graph + The graph or directed graph to process + k_filter : filter function + This function filters the nodes chosen. It takes three inputs: + A node of G, the filter's cutoff, and the core dict of the graph. + The function should return a Boolean value. + k : int, optional + The order of the core. If not specified use the max core number. + This value is used as the cutoff for the filter. + core : dict, optional + Precomputed core numbers keyed by node for the graph ``G``. + If not specified, the core numbers will be computed from ``G``. """ if core is None: core = core_number(G) if k is None: k = max(core.values()) - nodes = [v for v in core if func(v, k, core)] + nodes = (v for v in core if k_filter(v, k, core)) return G.subgraph(nodes).copy() -def k_core(G,k=None,core_number=None): +def k_core(G, k=None, core_number=None): """Return the k-core of G. A k-core is a maximal subgraph that contains nodes of degree k or more. @@ -171,21 +183,23 @@ def k_core(G,k=None,core_number=None): Vladimir Batagelj and Matjaz Zaversnik, 2003. http://arxiv.org/abs/cs.DS/0310049 """ - func = lambda v, k, core_number: core_number[v] >= k - return _core_helper(G, func, k, core_number) + def k_filter(v, k, c): + return c[v] >= k + return _core_subgraph(G, k_filter, k, core_number) -def k_shell(G,k=None,core_number=None): +def k_shell(G, k=None, core_number=None): """Return the k-shell of G. - The k-shell is the subgraph of nodes in the k-core but not in the (k+1)-core. + The k-shell is the subgraph induced by nodes with core number k. + That is, nodes in the k-core that are not in the (k+1)-core. Parameters ---------- G : NetworkX graph A graph or directed graph. k : int, optional - The order of the shell. If not specified return the main shell. + The order of the shell. If not specified return the outer shell. core_number : dictionary, optional Precomputed core numbers for the graph G. @@ -198,7 +212,8 @@ def k_shell(G,k=None,core_number=None): Raises ------ NetworkXError - The k-shell is not defined for graphs with self loops or parallel edges. + The k-shell is not implemented for graphs with self loops + or parallel edges. Notes ----- @@ -225,11 +240,12 @@ def k_shell(G,k=None,core_number=None): and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 http://www.pnas.org/content/104/27/11150.full """ - func = lambda v, k, core_number: core_number[v] == k - return _core_helper(G, func, k, core_number) + def k_filter(v, k, c): + return c[v] == k + return _core_subgraph(G, k_filter, k, core_number) -def k_crust(G,k=None,core_number=None): +def k_crust(G, k=None, core_number=None): """Return the k-crust of G. The k-crust is the graph G with the k-core removed. @@ -251,7 +267,8 @@ def k_crust(G,k=None,core_number=None): Raises ------ NetworkXError - The k-crust is not defined for graphs with self loops or parallel edges. + The k-crust is not implemented for graphs with self loops + or parallel edges. Notes ----- @@ -276,16 +293,14 @@ def k_crust(G,k=None,core_number=None): and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 http://www.pnas.org/content/104/27/11150.full """ - func = lambda v, k, core_number: core_number[v] <= k - # HACK These two checks are done in _core_helper, but this function - # requires k to be one less than the maximum core value instead of - # just the maximum. Therefore we duplicate the checks here. A better - # solution should exist... + # Default for k is one less than in _core_subgraph, so just inline. + # Filter is c[v] <= k if core_number is None: - core_number = nx.core_number(G) + core_number = find_cores(G) if k is None: k = max(core_number.values()) - 1 - return _core_helper(G, func, k, core_number) + nodes = (v for v in core_number if core_number[v] <= k) + return G.subgraph(nodes).copy() def k_corona(G, k, core_number=None): @@ -335,5 +350,6 @@ def k_corona(G, k, core_number=None): Phys. Rev. E 73, 056101 (2006) http://link.aps.org/doi/10.1103/PhysRevE.73.056101 """ - func = lambda v, k, c: c[v] == k and sum(1 for w in G[v] if c[w] >= k) == k - return _core_helper(G, func, k, core_number) + def func(v, k, c): + return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k) + return _core_subgraph(G, func, k, core_number)
diff --git a/networkx/algorithms/tests/test_core.py b/networkx/algorithms/tests/test_core.py --- a/networkx/algorithms/tests/test_core.py +++ b/networkx/algorithms/tests/test_core.py @@ -2,8 +2,8 @@ from nose.tools import * import networkx as nx -class TestCore: +class TestCore: def setUp(self): # G is the example graph in Figure 1 from Batagelj and # Zaversnik's paper titled An O(m) Algorithm for Cores @@ -12,103 +12,114 @@ def setUp(self): # shown, the 3-core is given by nodes 1-8, the 2-core by nodes # 9-16, the 1-core by nodes 17-20 and node 21 is in the # 0-core. - t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1) - t2=nx.convert_node_labels_to_integers(t1,5) - G=nx.union(t1,t2) - G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19), - (12,18), (3,9), (7,9), (7,10), (9,10), (9,20), - (17,13), (13,14), (14,15), (15,16), (16,13)]) + t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1) + t2 = nx.convert_node_labels_to_integers(t1, 5) + G = nx.union(t1, t2) + G.add_edges_from([(3, 7), (2, 11), (11, 5), (11, 12), (5, 12), + (12, 19), (12, 18), (3, 9), (7, 9), (7, 10), + (9, 10), (9, 20), (17, 13), (13, 14), (14, 15), + (15, 16), (16, 13)]) G.add_node(21) - self.G=G + self.G = G # Create the graph H resulting from the degree sequence - # [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm. + # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm. - degseq=[0,1,2,2,2,2,3] + degseq = [0, 1, 2, 2, 2, 2, 3] H = nx.havel_hakimi_graph(degseq) - mapping = {6:0, 0:1, 4:3, 5:6, 3:4, 1:2, 2:5 } + mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5} self.H = nx.relabel_nodes(H, mapping) def test_trivial(self): """Empty graph""" G = nx.Graph() - assert_equal(nx.find_cores(G),{}) + assert_equal(nx.find_cores(G), {}) def test_find_cores(self): - cores=nx.find_cores(self.G) - nodes_by_core=[] - for val in [0,1,2,3]: - nodes_by_core.append( sorted([k for k in cores if cores[k]==val])) - assert_equal(nodes_by_core[0],[21]) - assert_equal(nodes_by_core[1],[17, 18, 19, 20]) - assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16]) + core = nx.find_cores(self.G) + nodes_by_core = [sorted([n for n in core if core[n] == val]) + for val in range(4)] + assert_equal(nodes_by_core[0], [21]) + assert_equal(nodes_by_core[1], [17, 18, 19, 20]) + assert_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16]) assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8]) def test_core_number(self): # smoke test real name - cores=nx.core_number(self.G) + cores = nx.core_number(self.G) def test_find_cores2(self): - cores=nx.find_cores(self.H) - nodes_by_core=[] - for val in [0,1,2]: - nodes_by_core.append( sorted([k for k in cores if cores[k]==val])) - assert_equal(nodes_by_core[0],[0]) - assert_equal(nodes_by_core[1],[1, 3]) - assert_equal(nodes_by_core[2],[2, 4, 5, 6]) + core = nx.find_cores(self.H) + nodes_by_core = [sorted([n for n in core if core[n] == val]) + for val in range(3)] + assert_equal(nodes_by_core[0], [0]) + assert_equal(nodes_by_core[1], [1, 3]) + assert_equal(nodes_by_core[2], [2, 4, 5, 6]) + + def test_directed_find_cores(Self): + '''core number had a bug for directed graphs found in issue #1959''' + # small example where too timid edge removal can make cn[2] = 3 + G = nx.DiGraph() + edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)] + G.add_edges_from(edges) + assert_equal(nx.core_number(G), {1: 2, 2: 2, 3: 2, 4: 2}) + # small example where too aggressive edge removal can make cn[2] = 2 + more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)] + G.add_edges_from(more_edges) + assert_equal(nx.core_number(G), {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3}) def test_main_core(self): - main_core_subgraph=nx.k_core(self.H) - assert_equal(sorted(main_core_subgraph.nodes()),[2,4,5,6]) + main_core_subgraph = nx.k_core(self.H) + assert_equal(sorted(main_core_subgraph.nodes()), [2, 4, 5, 6]) def test_k_core(self): # k=0 - k_core_subgraph=nx.k_core(self.H,k=0) - assert_equal(sorted(k_core_subgraph.nodes()),sorted(self.H.nodes())) + k_core_subgraph = nx.k_core(self.H, k=0) + assert_equal(sorted(k_core_subgraph.nodes()), sorted(self.H.nodes())) # k=1 - k_core_subgraph=nx.k_core(self.H,k=1) - assert_equal(sorted(k_core_subgraph.nodes()),[1,2,3,4,5,6]) - # k=2 - k_core_subgraph=nx.k_core(self.H,k=2) - assert_equal(sorted(k_core_subgraph.nodes()),[2,4,5,6]) + k_core_subgraph = nx.k_core(self.H, k=1) + assert_equal(sorted(k_core_subgraph.nodes()), [1, 2, 3, 4, 5, 6]) + # k = 2 + k_core_subgraph = nx.k_core(self.H, k=2) + assert_equal(sorted(k_core_subgraph.nodes()), [2, 4, 5, 6]) def test_main_crust(self): - main_crust_subgraph=nx.k_crust(self.H) - assert_equal(sorted(main_crust_subgraph.nodes()),[0,1,3]) + main_crust_subgraph = nx.k_crust(self.H) + assert_equal(sorted(main_crust_subgraph.nodes()), [0, 1, 3]) def test_k_crust(self): - # k=0 - k_crust_subgraph=nx.k_crust(self.H,k=2) - assert_equal(sorted(k_crust_subgraph.nodes()),sorted(self.H.nodes())) + # k = 0 + k_crust_subgraph = nx.k_crust(self.H, k=2) + assert_equal(sorted(k_crust_subgraph.nodes()), sorted(self.H.nodes())) # k=1 - k_crust_subgraph=nx.k_crust(self.H,k=1) - assert_equal(sorted(k_crust_subgraph.nodes()),[0,1,3]) + k_crust_subgraph = nx.k_crust(self.H, k=1) + assert_equal(sorted(k_crust_subgraph.nodes()), [0, 1, 3]) # k=2 - k_crust_subgraph=nx.k_crust(self.H,k=0) - assert_equal(sorted(k_crust_subgraph.nodes()),[0]) + k_crust_subgraph = nx.k_crust(self.H, k=0) + assert_equal(sorted(k_crust_subgraph.nodes()), [0]) def test_main_shell(self): - main_shell_subgraph=nx.k_shell(self.H) - assert_equal(sorted(main_shell_subgraph.nodes()),[2,4,5,6]) + main_shell_subgraph = nx.k_shell(self.H) + assert_equal(sorted(main_shell_subgraph.nodes()), [2, 4, 5, 6]) def test_k_shell(self): # k=0 - k_shell_subgraph=nx.k_shell(self.H,k=2) - assert_equal(sorted(k_shell_subgraph.nodes()),[2,4,5,6]) + k_shell_subgraph = nx.k_shell(self.H, k=2) + assert_equal(sorted(k_shell_subgraph.nodes()), [2, 4, 5, 6]) # k=1 - k_shell_subgraph=nx.k_shell(self.H,k=1) - assert_equal(sorted(k_shell_subgraph.nodes()),[1,3]) + k_shell_subgraph = nx.k_shell(self.H, k=1) + assert_equal(sorted(k_shell_subgraph.nodes()), [1, 3]) # k=2 - k_shell_subgraph=nx.k_shell(self.H,k=0) - assert_equal(sorted(k_shell_subgraph.nodes()),[0]) + k_shell_subgraph = nx.k_shell(self.H, k=0) + assert_equal(sorted(k_shell_subgraph.nodes()), [0]) def test_k_corona(self): # k=0 - k_corona_subgraph=nx.k_corona(self.H,k=2) - assert_equal(sorted(k_corona_subgraph.nodes()),[2,4,5,6]) + k_corona_subgraph = nx.k_corona(self.H, k=2) + assert_equal(sorted(k_corona_subgraph.nodes()), [2, 4, 5, 6]) # k=1 - k_corona_subgraph=nx.k_corona(self.H,k=1) - assert_equal(sorted(k_corona_subgraph.nodes()),[1]) + k_corona_subgraph = nx.k_corona(self.H, k=1) + assert_equal(sorted(k_corona_subgraph.nodes()), [1]) # k=2 - k_corona_subgraph=nx.k_corona(self.H,k=0) - assert_equal(sorted(k_corona_subgraph.nodes()),[0]) + k_corona_subgraph = nx.k_corona(self.H, k=0) + assert_equal(sorted(k_corona_subgraph.nodes()), [0])
k-core algorithm produces incorrect output for DiGraph As per title, calling `nx.k_core(G, k = x)` does not return the x-core of a graph, if `G` is a `DiGraph`. See attached file. [6954_2011.txt](https://github.com/networkx/networkx/files/105086/6954_2011.txt) To reproduce, run: ``` python import networkx as nx G = nx.DiGraph() with open("6954_2011.txt", 'r') as f: for line in f: fields = line.strip().split('\t') G.add_edge(fields[0], fields[1]) core = nx.k_core(G, k = 12) core.number_of_nodes() # Outputs "24"; expected output: "12" ``` There are only 12 nodes with (in+out) degree 12 once you remove recursively all those which don't qualify. These are: IND AUT CHE BEL USA ESP CHN FRA NLD GBR ITA DEU While ``` python core.nodes() ``` says `['BEL', 'SWE', 'DEU', 'GBR', 'KOR', 'USA', 'SGP', 'MYS', 'POL', 'NLD', 'HKG', 'FRA', 'CHE', 'ESP', 'CHN', 'AUT', 'THA', 'JPN', 'TUR', 'ITA', 'IND', 'RUS', 'NOR', 'CZE']` The method seems to work ok for `nx.Graph`, or at least I've yet to find a counter-example.
How can a subgraph of 12 nodes have the node degree in that subgraph be >=12? The largest degree they can have in the subgraph is 11. Please look at the referred paper for the definition of k-core. I'm not saying that there is not a bug -- there might be. But you haven't demonstrated a bug. It is a directed graph, as per issue title. From the Networkx documentation of the method: ``` For directed graphs the node degree is defined to be the in-degree + out-degree. ``` Therefore, in a 12 node subgraph the largest "degree" can be 22 (11 outgoing edges + 11 ingoing edges). So there can be 12 nodes in a 12-core, in fact there can be even as little as 7. Yes, you are correct about the degree... I was mistaken. Still, the returned 12-core is bigger than your proposed subgraph (24 nodes vs 12 nodes). Is the returned subgraph not a 12-core? The definition of core number seems to be, for each node, the largest k for which that node is in a k-core. Then to form the returned subgraph, we take all nodes with e.g. core number >= 12. The paper claims that the k-cores nest inside each other as k increases. So this way of choosing nodes seems to make sense from that perspective. What I don't understand is that in your example the returned 12-core subgraph has nodes with degree 11 (within that subgraph). For example, 'NOR' has core number 12, but has degree 11 in the returned subgraph. I'm not an expert in k-cores and it has been years since I even looked at this part of the code. Do you have any perspective on how core numbers relate to the degree of the nodes in the k-core subgraph? As far as I know, the textbook definition of k-core is: ``` A k-core is a maximal subgraph that contains nodes of degree k or more. ``` This means that NOR cannot be part of the 12-core, because in that subgraph it has to have degree of at least 12. If NOR gets assigned a core number of 12, either the function assigning it is wrong, or the core number is not the correct way to determine if the node is in the k-core. My suspicious is that the former is true. In particular, the error seems to be in the last line of the nested for loop: ``` python core[u]-=1 ``` Since it's a directed graph, if we are removing a node with both in- and out- link, this line should remove 2, not 1. This is exactly what happens when we remove DNK from the set: it has a reciprocal link with NOR, but removes only one from NOR's core number. Here's a printout of the `core_number` function when `u` is NOR, for every pair of nodes I print one line at the beginning of the block and at the end, for each line the current core number of the node: ``` NOR ISL 19 1 NOR ISL 18 1 NOR CMR 18 3 NOR CMR 17 3 NOR AGO 17 4 NOR AGO 16 4 NOR DNK 16 10 NOR DNK 15 10 # Should be 14! NOR IDN 15 11 NOR IDN 14 11 NOR FIN 14 11 NOR FIN 13 11 NOR CAN 13 11 NOR CAN 12 11 ``` So the proposed fix would be an `if` before that instruction, checking if the nodes have a reciprocal connection, or always subtracting G.number_of_edges(u, v), or something like that. Your investigation seems correct to me. It makes sense. Also, the paper only provides the algorithm for undirected graphs with a paragraph describing how to change it to work for directed graphs. The paragraph does not mention changing the decrement size depending on number of edges between (u,v), and the algorithm details only state: `dec(deg[u])` So I see nothing in the paper to contradict what you are saying -- and it is the treatment that makes sense. Clearly directed graphs are a poor sibling in this implementation of the algorithm (and in the original paper). My initial response was also indicative of neglect of directed situations. :{ Thank you for being persistent and for your detailed investigation! If you would like to submit a pull request that would be great -- if you prefer that I do the coding I will take it on. I will try to create a test on a small graph as well that shows this issue. I took a quick look at that paper again. I can't see for sure without some closer reading - but double check the `inc(bin[du])` there too. I prefer not doing the pull request myself. I'm not 101% confident that my treatment won't introduce other bugs and I don't know the networkx code too well to predict unexpected behaviors, so I prefer leaving it to you guys. OK... Thank you @mikk-c I'll take a look at it, try to come up with a test too and send a shout out to you when its ready. @hagberg I agree that both the `inc(bin[du])` and `dec(deg[u])` are suspect (perhaps all the `inc` and `dec`). The paper explains that those two terms act to shift the node u to the next lower bin. But if the degree is reduced by 2 maybe it should shift by 2 bins. What if it leapfrogs the current vertex. I wonder if the author worked through the directed `deg = in+out` case carefully. It could be the right thing to do is to consider the undirected version of the graph unless you want to do something like http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf I'll look some more... we could make k_core `NotImplemented` for directed with docs for both making it undirected and pointing to d_core. OK... here's a summary of what I found. The [referenced 2003 paper](http://arxiv.org/pdf/cs/0310049v1.pdf) has a [precursor 2002 paper](http://arxiv.org/pdf/cs/0202039.pdf) both by Vladimir Batagelj and Matjaˇz Zaverˇsnik. There they give a more general theoretical discussion which shows that this algorithm should work with any monotonic measure of centrality--not just degree. This includes in+out degree as we are pursuing for directed graphs (and is explicitly discussed there). So we should not rule out directed graphs for k_core--though we may want to implement D-cores too. Furthermore, digging through the code and algorithm some more I see that we form a neighbor `set` rather than a neighbor `list`. That removes duplicates -- which is precisely what occurs if you use a DiGraph and have edges in both directions. By handling such a neighbor twice, we decrement `degree` and increment `bin` correctly. I have tested this change on the @mikk-c example graph and it corrects that example. If anyone wants to try it, change `set` to `list` on line 84 of core.py. I will update the code, docs and tests. Thank you to @mikk-c !! Longstanding Bug Finder. :)
2016-01-29T18:00:19
networkx/networkx
1,976
networkx__networkx-1976
[ "1875" ]
293632863d5e895691b56dff4b12c937b2ea77dc
diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py --- a/networkx/algorithms/connectivity/kcutsets.py +++ b/networkx/algorithms/connectivity/kcutsets.py @@ -3,6 +3,7 @@ Kanevsky all minimum node k cutsets algorithm. """ from operator import itemgetter +from itertools import combinations import networkx as nx from .utils import build_auxiliary_node_connectivity @@ -86,6 +87,22 @@ def all_node_cuts(G, k=None, flow_func=None): if not nx.is_connected(G): raise nx.NetworkXError('Input graph is disconnected.') + # Addess some corner cases first. + # For cycle graphs + if G.order() == G.size(): + if all(2 == d for n, d in G.degree()): + seen = set() + for u in G: + for v in nx.non_neighbors(G, u): + if (u, v) not in seen and (v, u) not in seen: + yield {v, u} + seen.add((v, u)) + return + # For complete Graphs + if nx.density(G) == 1: + for cut_set in combinations(G, len(G)-1): + yield set(cut_set) + return # Initialize data structures. # Keep track of the cuts already computed so we do not repeat them. seen = []
diff --git a/networkx/algorithms/connectivity/tests/test_kcutsets.py b/networkx/algorithms/connectivity/tests/test_kcutsets.py --- a/networkx/algorithms/connectivity/tests/test_kcutsets.py +++ b/networkx/algorithms/connectivity/tests/test_kcutsets.py @@ -241,3 +241,27 @@ def test_non_repeated_cuts(): assert_true(len(solution) == len(cuts)) for cut in cuts: assert_true(cut in solution) + + +def test_cycle_graph(): + G = nx.cycle_graph(5) + solution = [{0, 2}, {0, 3}, {1, 3}, {1, 4}, {2, 4}] + cuts = list(nx.all_node_cuts(G)) + assert_true(len(solution) == len(cuts)) + for cut in cuts: + assert_true(cut in solution) + + +def test_complete_graph(): + G = nx.complete_graph(5) + solution = [ + {0, 1, 2, 3}, + {0, 1, 2, 4}, + {0, 1, 3, 4}, + {0, 2, 3, 4}, + {1, 2, 3, 4}, + ] + cuts = list(nx.all_node_cuts(G)) + assert_true(len(solution) == len(cuts)) + for cut in cuts: + assert_true(cut in solution)
all_node_cuts returns too few and incorrect cuts. This could be a documentation issue, a bug or a user understanding issue. Are these cases pathological for the algorithm? Given a square graph: ``` a -- b | | c -- d ``` Based on a cursory reading of the documentation, I would have expected all_node_cuts() to return: ``` [{'a','d'}, {'c','b'}] ``` I get `[{'a','c'}]` but if this is a valid node cut then surely {a,b}, {b,d}, {c,d} are also equally valid and a function called "all node cuts" should return them. ``` G = nx.Graph([('a','b'), ('a','c'), ('c','d'), ('b','d')]) print( G.nodes() ) print( G.edges() ) print( list(nx.all_node_cuts(G)) ) >>> ['a', 'c', 'b', 'd'] >>> [('a', 'c'), ('a', 'b'), ('c', 'd'), ('b', 'd')] >>> [{'a', 'c'}] ``` Expanding to a hexagon, we see similar pattern of node cuts. There are many isometric node cuts omitted from the results list. Two of the proposed cuts fail to create "two or more connected components" as the documentation suggests. ``` G = nx.Graph([('a','b'), ('b','c'), ('c','d'),('d','e'), ('e','f'),('f','a')]) list(nx.all_node_cuts(G)) >>> [{'a', 'c'}, {'a', 'b'}, {'b', 'c'}] ```
It seems to me like a bug. As you said many isometric node cuts are being omitted and in these polygon cases some of the node cuts mentioned in the result fail to create "two or more connected components". I tried some more graphs and I think maybe it has got issues handling graphs with all vertices having the same degree. For eg, <pre> K_5=nx.complete_graph(5) list(nx.all_node_cuts(K_5)) >>> [{0, 1, 2, 3}] </pre> but should actually also return all other isometric node cuts. Did you noticed any other pattern of graphs for which <code>all_node_cuts</code> gives incorrect results? I did not test additional network configurations and filed this report on behalf of another user who was trying to understand the output. After testing their example and the hexagon network, I suspected that there was either an documentation issue or a algorithm issue. It may be the network does not conform to the scope of the algorithm and the right fix is to simple raise an error and add to the documentation.
2016-02-05T18:54:35
networkx/networkx
1,977
networkx__networkx-1977
[ "1810" ]
293632863d5e895691b56dff4b12c937b2ea77dc
diff --git a/networkx/algorithms/connectivity/cuts.py b/networkx/algorithms/connectivity/cuts.py --- a/networkx/algorithms/connectivity/cuts.py +++ b/networkx/algorithms/connectivity/cuts.py @@ -28,14 +28,12 @@ def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, This function returns the set of edges of minimum cardinality that, if removed, would destroy all paths among source and target in G. - Edge weights are not considered + Edge weights are not considered. See :meth:`minimum_cut` for + computing minimum cuts considering edge weights. Parameters ---------- G : NetworkX graph - Edges of the graph are expected to have an attribute called - 'capacity'. If this attribute is not present, the edge is - considered to have infinite capacity. s : node Source node for the flow.
Can't verify if minimum_st_node_cut actually works correctly or not Hi. I've been trying to determine the affect capacity has on cuts in a DiGraph playing with different graph shapes and capacity values. Maybe I'm completely off base, but I've scoured the Internet for small graph min cut samples (from university courses) with examples but have yet to successfully cut the same nodes/edges. My work is on another system, so I'll include a hand produced example in-line. The graph is based on the following, look under "Practice": http://www.mathcs.emory.edu/~cheung/Courses/323/Syllabus/NetFlow/max-flow-min-cut.html ``` import networkx as nx # NetworkX v1.10 ... G = nx.DiGraph() G.add_edge("s", "a", capacity=8) G.add_edge("s", "b", capacity=7) G.add_edge("s", "c", capacity=4) G.add_edge("a", "b", capacity=2) G.add_edge("b", "c", capacity=5) G.add_edge("a", "d", capacity=3) G.add_edge("b", "e", capacity=6) G.add_edge("c", "f", capacity=2) G.add_edge("a", "e", capacity=9) G.add_edge("c", "e", capacity=7) G.add_edge("d", "t", capacity=9) G.add_edge("e", "t", capacity=5) G.add_edge("f", "t", capacity=8) cut_nodes = nx.algorithms.connectivity.minimum_node_cut(G, "s", "t") print "Cut nodes: " + str(cut_nodes) # Always produces cuts for d, f, e cut_edges = nx.algorithms.connectivity.minimum_edge_cut(G, "s", "t") print "Cut edges: " + str(cut_edges) # Always produces cuts for d->t, f->t, e->t ``` The Emory course explaination indicates that a->d, e->t, c->f should be the cut edges by capacity. I've played with simplier graphs and the capacities never seem to affect the results.
I believe it is the minimum_cut function is the one you are looking for. Actually, using it produced the results you are expecting. Here is how I used it for your example. ``` cut_weight, partitions = nx.minimum_cut(G, "s", "t") print "cut edges capacity " + str(cut_weight) # The total weigh of edge_cut # Produces 10 print "Set of nodes in the 's' partition: " + str(partitions[0]) # Set of nodes in the partition containing s # Produces ['a', 'c', 'b', 'e', 's'] print "Set of nodes in the 't' partition: " + str(partitions[1]) # Set of nodes in the partition containing t # Produces (['d', 't', 'f']) edge_cut_list = [] # edge_cut is simply the edges between both partitions computed by the following nested loop. for p1_node in partitions[0]: for p2_node in partitions[1]: if G.has_edge(p1_node,p2_node): edge_cut_list.append((p1_node,p2_node)) print "Edges of the cut: " + str(edge_cut_list) # Produces the expected result: [('a', 'd'), ('c', 'f'), ('e', 't')] ``` Thanks. I've been playing with this so much that I had a hard time producing an example that I could manually migrate into this post. In my original issue I used minimum_node_cut and minimum_edge_cut, but I get the same non-capacity results with minimum_node_st_cut and minimum_edge_st_cut. Why would I think of them as considernig capacity? From the minimum_edge_st_cut documentation(https://networkx.github.io/documentation/latest/_modules/networkx/algorithms/connectivity/cuts.html#minimum_st_node_cut): "G : NetworkX graph Edges of the graph are expected to have an attribute called 'capacity'. If this attribute is not present, the edge is considered to have infinite capacity." I see now that minimum_edge_cut, minimum_node_cut and minimum_node_st_cut are all clear about cardinality, but the above documentation note confused me. I was originally looking at minimum_cut only, but the convenience of the node and edge cuts from the other methods lured me in their direction. BTW, I've played with minimum_cut this morning and the results look reasonable. I came up with a bit of logic to determine the cut nodes, however and thought there would be a better way to do it. (instead of looking at each node for each cut edge to determine if node 1 or 2 is the source or target and add the appropriate node to a cut set) The use of an aux and residual network with minimum_cut doesn't respect capacity. e.g. it looks like if you simply call minimum_cut with a build_auxiliary_edge_connectivity with/without a residual network, the returned partitions do not reflect the capacity weigthing. Is this correct? cut_weight, partitions = nx.algorithms.connectivity.minimum_cut(G, "s", "t") H = nx.algorithms.connectivity.build_auxiliary_edge_connectivity(G) R = nx.algorithms.flow.build_residual_network(H, "capacity") cut_weight, partitions = nx.algorithms.connectivity.minimum_cut(G, "s", "t", residual=R) Simply having the aux and residual networks present seems to break support for capacity. Maybe I'm using them wrong or don't understand how its supposed to be used. I am not really familiar with auxillary, and residual network so my response might be late for that. But when I figure something out I shall let you as soon as I can. I've noticed that sometimes the function **minimum_edge_cut( digraphA)** return a empty **set([])** in my examples. So would be glad to know if you guys know is this correct ? For a weakly connected DiGraph more than 2 nodes, an edge cut should always exist. So may be the minimum_edge_cut should never return an empty set().
2016-02-05T19:40:52
networkx/networkx
2,026
networkx__networkx-2026
[ "2025" ]
e87325dcdccfe2b748a5a95d8f455b154b3639e9
diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py --- a/networkx/algorithms/community/quality.py +++ b/networkx/algorithms/community/quality.py @@ -234,13 +234,6 @@ def coverage(G, partition): NetworkXError If ``partition`` is not a valid partition of the nodes of ``G``. - References - ---------- - .. [1] Santo Fortunato. - "Community Detection in Graphs". - *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 - <http://arxiv.org/abs/0906.0612> - Notes ----- If ``G`` is a multigraph, the multiplicity of edges is counted. diff --git a/networkx/algorithms/cuts.py b/networkx/algorithms/cuts.py --- a/networkx/algorithms/cuts.py +++ b/networkx/algorithms/cuts.py @@ -122,7 +122,7 @@ def volume(G, S, weight=None): ---------- .. [1] David Gleich. *Hierarchical Directed Spectral Graph Partitioning*. - <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> + <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> """ degree = G.out_degree if G.is_directed() else G.degree @@ -170,7 +170,7 @@ def normalized_cut_size(G, S, T=None, weight=None): ---------- .. [1] David Gleich. *Hierarchical Directed Spectral Graph Partitioning*. - <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> + <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> """ if T is None: @@ -217,7 +217,7 @@ def conductance(G, S, T=None, weight=None): ---------- .. [1] David Gleich. *Hierarchical Directed Spectral Graph Partitioning*. - <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> + <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf> """ if T is None: @@ -310,7 +310,7 @@ def mixing_expansion(G, S, T=None, weight=None): .. [1] Vadhan, Salil P. "Pseudorandomness." *Foundations and Trends - in Theoretical Computer Science* 7.1–3 (2011): 1–336. + in Theoretical Computer Science* 7.1–3 (2011): 1–336. <http://dx.doi.org/10.1561/0400000010> """ @@ -350,7 +350,7 @@ def node_expansion(G, S): .. [1] Vadhan, Salil P. "Pseudorandomness." *Foundations and Trends - in Theoretical Computer Science* 7.1–3 (2011): 1–336. + in Theoretical Computer Science* 7.1–3 (2011): 1–336. <http://dx.doi.org/10.1561/0400000010> """ @@ -388,8 +388,8 @@ def boundary_expansion(G, S): ---------- .. [1] Vadhan, Salil P. "Pseudorandomness." - *Foundations and Trends - in Theoretical Computer Science* 7.1–3 (2011): 1–336. + *Foundations and Trends in Theoretical Computer Science* + 7.1–3 (2011): 1–336. <http://dx.doi.org/10.1561/0400000010> """ diff --git a/networkx/algorithms/reciprocity.py b/networkx/algorithms/reciprocity.py --- a/networkx/algorithms/reciprocity.py +++ b/networkx/algorithms/reciprocity.py @@ -19,7 +19,7 @@ def reciprocity(G, nodes=None): The reciprocity of a directed graph is defined as the ratio of the number of edges pointing in both directions to the total number of edges in the graph. - Formally, r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|. + Formally, :math:`r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|`. The reciprocity of a single node u is defined similarly, it is the ratio of the number of edges in both directions to
add_cycle, add_path, etc. must be removed from Graph documentation The documentation for, for example, the `Graph` class includes an entry in autosummary for `add_cycle`, etc., but these methods were removed in #1970 in favor of functions. The autosummary entries must be removed.
Looks like they should be added to the "functions" autosummary entries as well. There are quite a few other errors in the [sphinx doc builds](http://readthedocs.org/projects/networkx/builds/3764367/) too... mostly with communicability and cuts. I'll try to clean that up.
2016-03-03T06:36:03
networkx/networkx
2,077
networkx__networkx-2077
[ "1997" ]
cf0819c360288e4091786ec6236c33d95145ac50
diff --git a/networkx/drawing/nx_pylab.py b/networkx/drawing/nx_pylab.py --- a/networkx/drawing/nx_pylab.py +++ b/networkx/drawing/nx_pylab.py @@ -1,3 +1,11 @@ +# Copyright (C) 2004-2016 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Author: Aric Hagberg ([email protected]) """ ********** Matplotlib @@ -13,14 +21,6 @@ pygraphviz: http://pygraphviz.github.io/ """ -# Author: Aric Hagberg ([email protected]) - -# Copyright (C) 2004-2016 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. import networkx as nx from networkx.drawing.layout import shell_layout,\ circular_layout,spectral_layout,spring_layout,random_layout @@ -35,8 +35,7 @@ 'draw_random', 'draw_spectral', 'draw_spring', - 'draw_shell', - 'draw_graphviz'] + 'draw_shell'] def draw(G, pos=None, ax=None, hold=None, **kwds): @@ -966,24 +965,6 @@ def draw_shell(G, **kwargs): draw(G, shell_layout(G, nlist=nlist), **kwargs) -def draw_graphviz(G, prog="neato", **kwargs): - """Draw networkx graph with graphviz layout. - - Parameters - ---------- - G : graph - A networkx graph - - prog : string, optional - Name of Graphviz layout program - - kwargs : optional keywords - See networkx.draw_networkx() for a description of optional keywords. - """ - pos = nx.drawing.graphviz_layout(G, prog) - draw(G, pos, **kwargs) - - def draw_nx(G, pos, **kwds): """For backward compatibility; use draw or draw_networkx.""" draw(G, pos, **kwds)
Fix location of graphviz_layout in draw_graphviz Lost in a rename/rearrange? Added a basic smoke-test that just calls the function with a graph to verify names are valid.
Ugh -- you are correct... it was an oversight on the rename/relocation process. Unfortunately it is more tricky than your fix covers because graphviz_layout is supplied by both the nx_pydot and nx_agraph modules. So the "nx_pydot" text you added will only work if they have pydot installed. If they only have pygraphviz then you will need to put nx_agraph there. Maybe it's best to try one and when it fails go to the other... Maybe @Michael-E-Rose or @jfinkels (or others) could put together some tests of the graphviz_layout function in nx_pylab.py (and non-test code in the function itself). Anybody looking for some coding fun? We need to make sure the function works in each installation case: 1) pydot and no pygraphviz, 2) pygraphviz and no pydot, 3) both, 4) neither. This function just does ``` python pos = nx.drawing.graphviz_layout(G, prog) draw(G, pos, **kwargs) ``` Can we just avoid the complexity of all of the possible graphviz installations and simply remove it? Removing it sounds good to me. :)
2016-04-14T21:20:29
networkx/networkx
2,081
networkx__networkx-2081
[ "2079" ]
023cfd51a1ff2b9692881ae1ecc91c246593853a
diff --git a/networkx/drawing/layout.py b/networkx/drawing/layout.py --- a/networkx/drawing/layout.py +++ b/networkx/drawing/layout.py @@ -290,7 +290,7 @@ def fruchterman_reingold_layout(G,dim=2,k=None, if len(G) == 0: return {} if len(G) == 1: - return {G.nodes()[0]: center} + return {next(G.nodes()): center} try: # Sparse matrix
spring layout breaks with single node graph This corner case (drawing 1 node) breaks spring layout and shouldn't ``` python import networkx as nx G=nx.Graph() G.add_node("spam") nx.draw_networkx(G, node_size = 2000, node_color = "white", pos=nx.spring_layout(G)) ``` http://stackoverflow.com/questions/36612991/how-to-draw-graph-having-single-node-in-networkx
2016-04-16T15:48:52
networkx/networkx
2,085
networkx__networkx-2085
[ "1981" ]
d0360c9056f7ae01a63e2f40c2c65681714acfc1
diff --git a/networkx/algorithms/assortativity/correlation.py b/networkx/algorithms/assortativity/correlation.py --- a/networkx/algorithms/assortativity/correlation.py +++ b/networkx/algorithms/assortativity/correlation.py @@ -190,13 +190,15 @@ def numeric_assortativity_coefficient(G, attribute, nodes=None): Assortativity measures the similarity of connections in the graph with respect to the given numeric attribute. - + The numeric attribute must be an integer. + Parameters ---------- G : NetworkX graph attribute : string - Node attribute key + Node attribute key. The corresponding attribute value must be an + integer. nodes: list or iterable (optional) Compute numeric assortativity only for attributes of nodes in diff --git a/networkx/algorithms/assortativity/mixing.py b/networkx/algorithms/assortativity/mixing.py --- a/networkx/algorithms/assortativity/mixing.py +++ b/networkx/algorithms/assortativity/mixing.py @@ -166,13 +166,15 @@ def degree_mixing_matrix(G, x='out', y='in', weight=None, def numeric_mixing_matrix(G,attribute,nodes=None,normalized=True): """Return numeric mixing matrix for attribute. + The attribute must be an integer. + Parameters ---------- G : graph NetworkX graph object. attribute : string - Node attribute key. + Node attribute key. The corresponding attribute must be an integer. nodes: list or iterable (optional) Build the matrix only with nodes in container. The default is all nodes.
calculating numeric assortativity requires int? I am trying to use networkx to calculate numeric assortativity based on a numeric attribute that I set to nodes. My node attributes are floats. When I call the assortativity function: `assort = nx.numeric_assortativity_coefficient(G,'float_attr')` I got the following errors. ``` File "/some dir.../networkx/algorithms/assortativity/correlation.py", line 229, in numeric_assortativity_coefficient a = numeric_mixing_matrix(G,attribute,nodes) File "/some dir.../networkx/algorithms/assortativity/mixing.py", line 193, in numeric_mixing_matrix mapping=dict(zip(range(m+1),range(m+1))) TypeError: range() integer end argument expected, got float. ``` I checked the documentation page of networkx assortativity algorithm and it did not say the numeric attributes have to be int. Anyone knows if that's required? BTW, I used the same network and a gender attribute (set to 0 and 1) to calculate both the attribute and the numeric assortativity. I had no problem with that. So it seems that the problem is with the int/float type of the node attribute. However, a lot of numeric node attributes are in fact continuous variables, which are best represented using float instead of int data type. I would suggest either 1) make it clear in the documentation that the node attribute to calculate numeric assortativity requires int. or 2) make changes to the code so that it can deal with float type of node attributes.
You are right - that function only works for discrete integer values. It wouldn't be that hard I think to generalize that to the continuous case if someone would like to do that. In the mean time the docs should be improved as you suggest. On it.
2016-04-16T22:53:43
networkx/networkx
2,090
networkx__networkx-2090
[ "2087" ]
dbf738d8c93e3284ec4d43cb7ffab7d4e2aa98d6
diff --git a/networkx/algorithms/community/__init__.py b/networkx/algorithms/community/__init__.py --- a/networkx/algorithms/community/__init__.py +++ b/networkx/algorithms/community/__init__.py @@ -1,6 +1,6 @@ from networkx.algorithms.community.asyn_lpa import * from networkx.algorithms.community.centrality import * -from networkx.algorithms.community.generators import * +from networkx.algorithms.community.community_generators import * from networkx.algorithms.community.kclique import * from networkx.algorithms.community.kernighan_lin import * from networkx.algorithms.community.quality import * diff --git a/networkx/algorithms/community/generators.py b/networkx/algorithms/community/community_generators.py similarity index 100% rename from networkx/algorithms/community/generators.py rename to networkx/algorithms/community/community_generators.py
Namespace collision on nx.generators It looks like nx.generators is no longer the modules in the folder networkx/generators. Instead it is the contents of networkx/algorithms/community/generators.py Can we rename that file to avoid namespace collisions on the word generators? Maybe it should be moved to the generators directory and called `community_graphs.py`?
Either way is fine with me.
2016-04-18T00:21:56
networkx/networkx
2,092
networkx__networkx-2092
[ "2070" ]
d26d5e4de3a7a7fa609f56295dd8537bccffccd9
diff --git a/networkx/algorithms/dominance.py b/networkx/algorithms/dominance.py --- a/networkx/algorithms/dominance.py +++ b/networkx/algorithms/dominance.py @@ -126,17 +126,12 @@ def dominance_frontiers(G, start): """ idom = nx.immediate_dominators(G, start) - df = {u: [] for u in idom} - + df = {u: set() for u in idom} for u in idom: - if len(G.pred[u]) - int(u in G.pred[u]) >= 2: - p = set() + if len(G.pred[u]) >= 2: for v in G.pred[u]: - while v != idom[u] and v not in p: - p.add(v) - v = idom[v] - p.discard(u) - for v in p: - df[v].append(u) - + if v in idom: + while v != idom[u]: + df[v].add(u) + v = idom[v] return df
diff --git a/networkx/algorithms/tests/test_dominance.py b/networkx/algorithms/tests/test_dominance.py --- a/networkx/algorithms/tests/test_dominance.py +++ b/networkx/algorithms/tests/test_dominance.py @@ -99,28 +99,28 @@ def test_exceptions(self): def test_singleton(self): G = nx.DiGraph() G.add_node(0) - assert_equal(nx.dominance_frontiers(G, 0), {0: []}) + assert_equal(nx.dominance_frontiers(G, 0), {0: set()}) G.add_edge(0, 0) - assert_equal(nx.dominance_frontiers(G, 0), {0: []}) + assert_equal(nx.dominance_frontiers(G, 0), {0: set()}) def test_path(self): n = 5 G = nx.path_graph(n, create_using=nx.DiGraph()) assert_equal(nx.dominance_frontiers(G, 0), - {i: [] for i in range(n)}) + {i: set() for i in range(n)}) def test_cycle(self): n = 5 G = nx.cycle_graph(n, create_using=nx.DiGraph()) assert_equal(nx.dominance_frontiers(G, 0), - {i: [] for i in range(n)}) + {i: set() for i in range(n)}) def test_unreachable(self): n = 5 assert_greater(n, 1) G = nx.path_graph(n, create_using=nx.DiGraph()) assert_equal(nx.dominance_frontiers(G, n // 2), - {i: [] for i in range(n // 2, n)}) + {i: set() for i in range(n // 2, n)}) def test_irreducible1(self): # Graph taken from Figure 2 of @@ -129,9 +129,11 @@ def test_irreducible1(self): # Software Practice & Experience, 4:110, 2001. edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] G = nx.DiGraph(edges) - assert_equal({u: sorted(df) + assert_equal({u: df for u, df in nx.dominance_frontiers(G, 5).items()}, - {1: [2], 2: [1], 3: [2], 4: [1], 5: []}) + {1: set([2]), 2: set([1]), 3: set([2]), + 4: set([1]), 5: set()}) + def test_irreducible2(self): # Graph taken from Figure 4 of @@ -142,18 +144,21 @@ def test_irreducible2(self): (6, 4), (6, 5)] G = nx.DiGraph(edges) assert_equal(nx.dominance_frontiers(G, 6), - {1: [2], 2: [1, 3], 3: [2], 4: [2, 3], 5: [1], 6: []}) + {1: set([2]), 2: set([1, 3]), 3: set([2]), 4: set([2, 3]) + , 5: set([1]), 6: set([])}) def test_domrel_png(self): # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] G = nx.DiGraph(edges) assert_equal(nx.dominance_frontiers(G, 1), - {1: [], 2: [], 3: [5], 4: [5], 5: [2], 6: []}) + {1: set([]), 2: set([2]), 3: set([5]), 4: set([5]), + 5: set([2]), 6: set()}) # Test postdominance. with nx.utils.reversed(G): assert_equal(nx.dominance_frontiers(G, 6), - {1: [], 2: [], 3: [2], 4: [2], 5: [2], 6: []}) + {1: set(), 2: set([2]), 3: set([2]), 4: set([2]), + 5: set([2]), 6: set()}) def test_boost_example(self): # Graph taken from Figure 1 of @@ -162,10 +167,97 @@ def test_boost_example(self): (5, 7), (6, 4)] G = nx.DiGraph(edges) assert_equal(nx.dominance_frontiers(G, 0), - {0: [], 1: [], 2: [7], 3: [7], 4: [7], 5: [7], 6: [4], - 7: []}) + {0: set(), 1: set(), 2: set([7]), 3: set([7]), + 4: set([4,7]), 5: set([7]), 6: set([4]), 7: set()}) # Test postdominance. with nx.utils.reversed(G): assert_equal(nx.dominance_frontiers(G, 7), - {0: [], 1: [], 2: [1], 3: [1], 4: [1], 5: [1], 6: [4], - 7: []}) + {0: set(), 1: set(), 2: set([1]), 3: set([1]), + 4: set([1,4]), 5: set([1]), 6: set([4]), 7: set()}) + + + def test_discard_issue(self): + # https://github.com/networkx/networkx/issues/2071 + g = nx.DiGraph() + g.add_edges_from([ + ('b0','b1'), + ('b1', 'b2'), + ('b2', 'b3'), + ('b3','b1'), + ('b1','b5'), + ('b5', 'b6'), + ('b5', 'b8'), + ('b6', 'b7'), + ('b8', 'b7'), + ('b7', 'b3'), + ('b3', 'b4') + ] + ) + df = nx.dominance_frontiers(g, 'b0') + assert_equal(df, {'b4': set(), 'b5': set(['b3']), 'b6': set(['b7']), + 'b7': set(['b3']), + 'b0': set(), 'b1': set(['b1']), 'b2': set(['b3']), + 'b3': set(['b1']), 'b8': set(['b7'])}) + + def test_loop(self): + g = nx.DiGraph() + g.add_edges_from([('a','b'),('b','c'),('b','a')]) + df = nx.dominance_frontiers(g, 'a') + assert_equal(df, {'a': set(), 'b': set(), 'c': set()}) + + def test_missing_immediate_doms(self): + # see https://github.com/networkx/networkx/issues/2070 + g = nx.DiGraph() + edges = [ + ('entry_1', 'b1'), + ('b1', 'b2'), + ('b2', 'b3'), + ('b3', 'exit'), + ('entry_2', 'b3') + ] + + # entry_1 + # | + # b1 + # | + # b2 entry_2 + # | / + # b3 + # | + # exit + + g.add_edges_from(edges) + # formerly raised KeyError on entry_2 when parsing b3 + # because entry_2 does not have immediate doms (no path) + nx.dominance_frontiers(g,'entry_1') + + def test_loops_larger(self): + # from + # http://ecee.colorado.edu/~waite/Darmstadt/motion.html + g = nx.DiGraph() + edges = [ + ('entry', 'exit'), + ('entry', '1'), + ('1', '2'), + ('2', '3'), + ('3', '4'), + ('4', '5'), + ('5', '6'), + ('6', 'exit'), + ('6', '2'), + ('5', '3'), + ('4', '4') + ] + + g.add_edges_from(edges) + df = nx.dominance_frontiers(g,'entry') + answer = {'entry': set(), + '1': set(['exit']), + '2': set(['exit', '2']), + '3': set(['exit', '3', '2']), + '4': set(['exit', '4','3', '2']), + '5': set(['exit', '3', '2']), + '6': set(['exit', '2']), + 'exit': set()} + for n in df: + assert_equal(set(df[n]),set(answer[n]))
Small bug in networkx.algorithms.dominance There is a small bug in `networkx.algorithms.dominance` package. Function `immediate_dominators` should be given digraph and starting point. However, if a given graph has several entry points, say `entry_1` and `entry_2` (e.g. dead code block in control flow graph), and the starting point is specified as `entry_1`, `entry_2` will not be present in `idom` dict (return value) as key. This is correct behavior concerning the definition of dominators, but this can lead to `KeyError` exception in a call to `dominance_frontiers` function, because it does not check predecessors to be reachable from start. Below is a snippet representing an issue. ``` import networkx as nx from networkx.algorithms.dominance import * g = nx.DiGraph() edges = [ ('entry_1', 'b1'), ('b1', 'b2'), ('b2', 'b3'), ('b3', 'exit'), ('entry_2', 'b3') ] # entry_1 # | # b1 # | # b2 entry_2 # | / # b3 # | # exit g.add_edges_from(edges) # will raise KeyError on entry_2 when parsing b3 # because entry_2 does not have immediate doms (no path) dominance_frontiers(g,'entry_1') ``` Suggestion is to parse only those predecessors that have path from given starting point to them. This can be achieved by adding `v in idom` check which is equivalent to having a path from start point in this context. P.S. tested on networkx version 1.11
Can you be more specific on the fix? I'm not an expert in this algorithm. Just add v in idom check before doing something in for loop in dominance_frontiers function, as I suggested before. After computing immediate dominators, idom keys will be only those nodes, that have immediate dominator, which is equivalent to having path from starting point.
2016-04-18T16:08:11
networkx/networkx
2,096
networkx__networkx-2096
[ "1749" ]
d26d5e4de3a7a7fa609f56295dd8537bccffccd9
diff --git a/networkx/drawing/layout.py b/networkx/drawing/layout.py --- a/networkx/drawing/layout.py +++ b/networkx/drawing/layout.py @@ -1,3 +1,12 @@ +# Copyright (C) 2004-2016 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Authors: Aric Hagberg <[email protected]>, +# Dan Schult <[email protected]> """ ****** Layout @@ -5,31 +14,30 @@ Node positioning algorithms for graph drawing. -The default scales and centering for these layouts are -typically squares with side [0, 1] or [0, scale]. -The two circular layout routines (circular_layout and -shell_layout) have size [-1, 1] or [-scale, scale]. -""" -# Authors: Aric Hagberg <[email protected]>, -# Dan Schult <[email protected]> +For `random_layout()` the possible resulting shape +is a square of side [0, scale] (default: [0, 1]) +Changing `center` shifts the layout by that amount. -# Copyright (C) 2004-2016 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. +For the other layout routines, the extent is +[center - scale, center + scale] (default: [-1, 1]). + +Warning: Most layout routines have only been tested in 2-dimensions. + +""" +from __future__ import division import collections import networkx as nx __all__ = ['circular_layout', 'random_layout', + 'rescale_layout', 'shell_layout', 'spring_layout', 'spectral_layout', 'fruchterman_reingold_layout'] -def process_params(G, center, dim): + +def _process_params(G, center, dim): # Some boilerplate code. import numpy as np @@ -50,7 +58,7 @@ def process_params(G, center, dim): return G, center -def random_layout(G, dim=2, center=None): +def random_layout(G, center=None, dim=2): """Position nodes uniformly at random in the unit square. For every node, a position is generated by choosing each of dim @@ -61,18 +69,18 @@ def random_layout(G, dim=2, center=None): Parameters ---------- G : NetworkX graph or list of nodes - A position will be assigned to every node in G. - - dim : int - Dimension of layout. + A position will be assigned to every node in G. center : array-like or None - Coordinate pair around which to center the layout. + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. Returns ------- pos : dict - A dictionary of positions keyed by node + A dictionary of positions keyed by node Examples -------- @@ -82,7 +90,7 @@ def random_layout(G, dim=2, center=None): """ import numpy as np - G, center = process_params(G, center, dim) + G, center = _process_params(G, center, dim) shape = (len(G), dim) pos = np.random.random(shape) + center pos = pos.astype(np.float32) @@ -91,7 +99,7 @@ def random_layout(G, dim=2, center=None): return pos -def circular_layout(G, dim=2, scale=1, center=None): +def circular_layout(G, scale=1, center=None, dim=2): # dim=2 only """Position nodes on a circle. @@ -99,24 +107,24 @@ def circular_layout(G, dim=2, scale=1, center=None): ---------- G : NetworkX graph or list of nodes - dim : int - Dimension of layout, currently only dim=2 is supported - scale : float Scale factor for positions center : array-like or None - Coordinate pair around which to center the layout. + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported Returns ------- - dict : - A dictionary of positions keyed by node + pos : dict + A dictionary of positions keyed by node Examples -------- - >>> G=nx.path_graph(4) - >>> pos=nx.circular_layout(G) + >>> G = nx.path_graph(4) + >>> pos = nx.circular_layout(G) Notes ----- @@ -126,7 +134,7 @@ def circular_layout(G, dim=2, scale=1, center=None): """ import numpy as np - G, center = process_params(G, center, dim) + G, center = _process_params(G, center, dim) if len(G) == 0: pos = {} @@ -137,12 +145,13 @@ def circular_layout(G, dim=2, scale=1, center=None): theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi theta = theta.astype(np.float32) pos = np.column_stack([np.cos(theta), np.sin(theta)]) - pos = _rescale_layout(pos, scale=scale) + center + pos = rescale_layout(pos, scale=scale) + center pos = dict(zip(G, pos)) return pos -def shell_layout(G, nlist=None, dim=2, scale=1, center=None): + +def shell_layout(G, nlist=None, scale=1, center=None, dim=2): """Position nodes in concentric circles. Parameters @@ -152,24 +161,24 @@ def shell_layout(G, nlist=None, dim=2, scale=1, center=None): nlist : list of lists List of node lists for each shell. - dim : int - Dimension of layout, currently only dim=2 is supported - scale : float Scale factor for positions center : array-like or None - Coordinate pair around which to center the layout. + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported Returns ------- - dict : - A dictionary of positions keyed by node + pos : dict + A dictionary of positions keyed by node Examples -------- >>> G = nx.path_graph(4) - >>> shells = [[0], [1,2,3]] + >>> shells = [[0], [1, 2, 3]] >>> pos = nx.shell_layout(G, shells) Notes @@ -180,14 +189,13 @@ def shell_layout(G, nlist=None, dim=2, scale=1, center=None): """ import numpy as np - G, center = process_params(G, center, dim) + G, center = _process_params(G, center, dim) if len(G) == 0: return {} if len(G) == 1: return {nx.utils.arbitrary_element(G): center} - if nlist is None: # draw the whole graph in one shell nlist = [list(G)] @@ -199,50 +207,48 @@ def shell_layout(G, nlist=None, dim=2, scale=1, center=None): # else start at r=1 radius = 1.0 - npos={} + npos = {} for nodes in nlist: # Discard the extra angle since it matches 0 radians. theta = np.linspace(0, 1, len(nodes) + 1)[:-1] * 2 * np.pi theta = theta.astype(np.float32) pos = np.column_stack([np.cos(theta), np.sin(theta)]) - pos = _rescale_layout(pos, scale=scale * radius / len(nlist)) + center + pos = rescale_layout(pos, scale=scale * radius / len(nlist)) + center npos.update(zip(nodes, pos)) radius += 1.0 return npos -def fruchterman_reingold_layout(G,dim=2,k=None, +def fruchterman_reingold_layout(G, k=None, pos=None, fixed=None, iterations=50, weight='weight', scale=1.0, - center=None): + center=None, + dim=2): """Position nodes using Fruchterman-Reingold force-directed algorithm. Parameters ---------- G : NetworkX graph or list of nodes - dim : int - Dimension of layout - k : float (default=None) - Optimal distance between nodes. If None the distance is set to - 1/sqrt(n) where n is the number of nodes. Increase this value - to move nodes farther apart. + Optimal distance between nodes. If None the distance is set to + 1/sqrt(n) where n is the number of nodes. Increase this value + to move nodes farther apart. pos : dict or None optional (default=None) - Initial positions for nodes as a dictionary with node as keys - and values as a list or tuple. If None, then use random initial - positions. + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + random initial positions. fixed : list or None optional (default=None) - Nodes to keep fixed at initial position. + Nodes to keep fixed at initial position. iterations : int optional (default=50) - Number of iterations of spring-force relaxation + Number of iterations of spring-force relaxation weight : string or None optional (default='weight') The edge attribute that holds the numerical value used for @@ -250,27 +256,30 @@ def fruchterman_reingold_layout(G,dim=2,k=None, scale : float (default=1.0) Scale factor for positions. The nodes are positioned - in a box of size [0,scale] x [0,scale]. + in a box of size [0, scale] x [0, scale]. center : array-like or None - Coordinate pair around which to center the layout. + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout Returns ------- - dict : - A dictionary of positions keyed by node + pos : dict + A dictionary of positions keyed by node Examples -------- - >>> G=nx.path_graph(4) - >>> pos=nx.spring_layout(G) + >>> G = nx.path_graph(4) + >>> pos = nx.spring_layout(G) - # The same using longer function name - >>> pos=nx.fruchterman_reingold_layout(G) + # The same using longer but equivalent function name + >>> pos = nx.fruchterman_reingold_layout(G) """ import numpy as np - G, center = process_params(G, center, dim) + G, center = _process_params(G, center, dim) if fixed is not None: nfixed = dict(zip(G, range(len(G)))) @@ -278,14 +287,14 @@ def fruchterman_reingold_layout(G,dim=2,k=None, if pos is not None: # Determine size of existing domain to adjust initial positions - dom_size = max(flatten(pos.values())) + dom_size = max(coord for coord in pos_tup for pos_tup in pos.values()) shape = (len(G), dim) pos_arr = np.random.random(shape) * dom_size + center - for i,n in enumerate(G): + for i, n in enumerate(G): if n in pos: pos_arr[i] = np.asarray(pos[n]) else: - pos_arr=None + pos_arr = None if len(G) == 0: return {} @@ -296,53 +305,58 @@ def fruchterman_reingold_layout(G,dim=2,k=None, # Sparse matrix if len(G) < 500: # sparse solver for large graphs raise ValueError - A = nx.to_scipy_sparse_matrix(G,weight=weight,dtype='f') + A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='f') if k is None and fixed is not None: - # We must adjust k by domain size for layouts that are not near 1x1 - nnodes,_ = A.shape - k = dom_size / np.sqrt(nnodes) - pos = _sparse_fruchterman_reingold(A, dim, k, pos_arr, fixed, iterations) + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _sparse_fruchterman_reingold(A, k, pos_arr, fixed, + iterations, dim) except: - A = nx.to_numpy_matrix(G,weight=weight) + A = nx.to_numpy_matrix(G, weight=weight) if k is None and fixed is not None: - # We must adjust k by domain size for layouts that are not near 1x1 - nnodes,_ = A.shape - k = dom_size / np.sqrt(nnodes) - pos = _fruchterman_reingold(A, dim, k, pos_arr, fixed, iterations) + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _fruchterman_reingold(A, k, pos_arr, fixed, iterations, dim) if fixed is None: - pos = _rescale_layout(pos, scale=scale) + center - pos = dict(zip(G,pos)) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) return pos -spring_layout=fruchterman_reingold_layout -def _fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None, - iterations=50): +spring_layout = fruchterman_reingold_layout + + +def _fruchterman_reingold(A, k=None, pos=None, fixed=None, + iterations=50, dim=2): # Position nodes in adjacency matrix A using Fruchterman-Reingold # Entry point for NetworkX graph is fruchterman_reingold_layout() try: import numpy as np except ImportError: - raise ImportError("_fruchterman_reingold() requires numpy: http://scipy.org/ ") + msg = "_fruchterman_reingold() requires numpy: http://scipy.org/ " + raise ImportError(msg) try: - nnodes,_=A.shape + nnodes, _ = A.shape except AttributeError: - raise nx.NetworkXError( - "fruchterman_reingold() takes an adjacency matrix as input") + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) - A=np.asarray(A) # make sure we have an array instead of a matrix + # make sure we have an array instead of a matrix + A = np.asarray(A) if pos is None: # random initial positions - pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype) + pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype) else: # make sure positions are of same type as matrix - pos=pos.astype(A.dtype) + pos = pos.astype(A.dtype) # optimal distance between nodes if k is None: - k=np.sqrt(1.0/nnodes) + k = np.sqrt(1.0/nnodes) # the initial "temperature" is about .1 of domain area (=1x1) # this is the largest step allowed in the dynamics. # We need to calculate this in case our fixed positions force our domain @@ -350,118 +364,117 @@ def _fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None, t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1]))*0.1 # simple cooling scheme. # linearly step down by dt on each iteration so last iteration is size dt. - dt=t/float(iterations+1) - delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype) + dt = t/float(iterations+1) + delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype) # the inscrutable (but fast) version # this is still O(V^2) # could use multilevel methods to speed this up significantly for iteration in range(iterations): # matrix of difference between points for i in range(pos.shape[1]): - delta[:,:,i]= pos[:,i,None]-pos[:,i] + delta[:, :, i] = pos[:, i, None] - pos[:, i] # distance between points - distance=np.sqrt((delta**2).sum(axis=-1)) + distance = np.sqrt((delta**2).sum(axis=-1)) # enforce minimum distance of 0.01 - distance=np.where(distance<0.01,0.01,distance) + distance = np.where(distance < 0.01, 0.01, distance) # displacement "force" - displacement=np.transpose(np.transpose(delta)*\ - (k*k/distance**2-A*distance/k))\ - .sum(axis=1) + displacement = np.transpose(np.transpose(delta) * + (k * k / distance**2 - A * distance / k) + ).sum(axis=1) # update positions - length=np.sqrt((displacement**2).sum(axis=1)) - length=np.where(length<0.01,0.1,length) - delta_pos=np.transpose(np.transpose(displacement)*t/length) + length = np.sqrt((displacement**2).sum(axis=1)) + length = np.where(length < 0.01, 0.1, length) + delta_pos = np.transpose(np.transpose(displacement) * t / length) if fixed is not None: # don't change positions of fixed nodes - delta_pos[fixed]=0.0 - pos+=delta_pos + delta_pos[fixed] = 0.0 + pos += delta_pos # cool temperature - t-=dt + t -= dt return pos -def _sparse_fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None, - iterations=50): +def _sparse_fruchterman_reingold(A, k=None, pos=None, fixed=None, + iterations=50, dim=2): # Position nodes in adjacency matrix A using Fruchterman-Reingold # Entry point for NetworkX graph is fruchterman_reingold_layout() # Sparse version try: import numpy as np except ImportError: - raise ImportError("_sparse_fruchterman_reingold() requires numpy: http://scipy.org/ ") + m = "_sparse_fruchterman_reingold() requires numpy: http://scipy.org/" + raise ImportError(m) try: - nnodes,_=A.shape + nnodes, _ = A.shape except AttributeError: - raise nx.NetworkXError( - "fruchterman_reingold() takes an adjacency matrix as input") + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) try: - from scipy.sparse import spdiags,coo_matrix + from scipy.sparse import spdiags, coo_matrix except ImportError: - raise ImportError("_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ ") + msg = "_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ " + raise ImportError(msg) # make sure we have a LIst of Lists representation try: - A=A.tolil() + A = A.tolil() except: - A=(coo_matrix(A)).tolil() + A = (coo_matrix(A)).tolil() if pos is None: # random initial positions - pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype) + pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype) else: # make sure positions are of same type as matrix - pos=pos.astype(A.dtype) + pos = pos.astype(A.dtype) # no fixed nodes if fixed is None: - fixed=[] + fixed = [] # optimal distance between nodes if k is None: - k=np.sqrt(1.0/nnodes) + k = np.sqrt(1.0/nnodes) # the initial "temperature" is about .1 of domain area (=1x1) # this is the largest step allowed in the dynamics. - t=0.1 + t = 0.1 # simple cooling scheme. # linearly step down by dt on each iteration so last iteration is size dt. - dt=t/float(iterations+1) + dt = t / float(iterations+1) - displacement=np.zeros((dim,nnodes)) + displacement = np.zeros((dim, nnodes)) for iteration in range(iterations): - displacement*=0 + displacement *= 0 # loop over rows for i in range(A.shape[0]): if i in fixed: continue # difference between this row's node position and all others - delta=(pos[i]-pos).T + delta = (pos[i] - pos).T # distance between points - distance=np.sqrt((delta**2).sum(axis=0)) + distance = np.sqrt((delta**2).sum(axis=0)) # enforce minimum distance of 0.01 - distance=np.where(distance<0.01,0.01,distance) + distance = np.where(distance < 0.01, 0.01, distance) # the adjacency matrix row - Ai=np.asarray(A.getrowview(i).toarray()) + Ai = np.asarray(A.getrowview(i).toarray()) # displacement "force" - displacement[:,i]+=\ - (delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1) + displacement[:, i] +=\ + (delta * (k * k / distance**2 - Ai * distance / k)).sum(axis=1) # update positions - length=np.sqrt((displacement**2).sum(axis=0)) - length=np.where(length<0.01,0.1,length) - pos+=(displacement*t/length).T + length = np.sqrt((displacement**2).sum(axis=0)) + length = np.where(length < 0.01, 0.1, length) + pos += (displacement * t / length).T # cool temperature - t-=dt + t -= dt return pos -def spectral_layout(G, dim=2, weight='weight', scale=1, center=None): +def spectral_layout(G, weight='weight', scale=1, center=None, dim=2): """Position nodes using the eigenvectors of the graph Laplacian. Parameters ---------- G : NetworkX graph or list of nodes - dim : int - Dimension of layout - weight : string or None optional (default='weight') The edge attribute that holds the numerical value used for the edge weight. If None, then all edge weights are 1. @@ -470,17 +483,20 @@ def spectral_layout(G, dim=2, weight='weight', scale=1, center=None): Scale factor for positions center : array-like or None - Coordinate pair around which to center the layout. + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout Returns ------- - dict : - A dictionary of positions keyed by node + pos : dict + A dictionary of positions keyed by node Examples -------- - >>> G=nx.path_graph(4) - >>> pos=nx.spectral_layout(G) + >>> G = nx.path_graph(4) + >>> pos = nx.spectral_layout(G) Notes ----- @@ -493,7 +509,7 @@ def spectral_layout(G, dim=2, weight='weight', scale=1, center=None): # handle some special cases that break the eigensolvers import numpy as np - G, center = process_params(G, center, dim) + G, center = _process_params(G, center, dim) if len(G) <= 2: if len(G) == 0: @@ -502,16 +518,16 @@ def spectral_layout(G, dim=2, weight='weight', scale=1, center=None): pos = np.array([center]) else: pos = np.array([np.zeros(dim), np.array(center)*2.0]) - return dict(zip(G,pos)) + return dict(zip(G, pos)) try: # Sparse matrix - if len(G)< 500: # dense solver is faster for small graphs + if len(G) < 500: # dense solver is faster for small graphs raise ValueError A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='d') # Symmetrize directed graphs if G.is_directed(): A = A + np.transpose(A) - pos = _sparse_spectral(A,dim) + pos = _sparse_spectral(A, dim) except (ImportError, ValueError): # Dense matrix A = nx.to_numpy_matrix(G, weight=weight) @@ -520,8 +536,8 @@ def spectral_layout(G, dim=2, weight='weight', scale=1, center=None): A = A + np.transpose(A) pos = _spectral(A, dim) - pos = _rescale_layout(pos, scale) + center - pos = dict(zip(G,pos)) + pos = rescale_layout(pos, scale) + center + pos = dict(zip(G, pos)) return pos @@ -531,26 +547,28 @@ def _spectral(A, dim=2): try: import numpy as np except ImportError: - raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ") + msg = "spectral_layout() requires numpy: http://scipy.org/ " + raise ImportError(msg) try: - nnodes,_=A.shape + nnodes, _ = A.shape except AttributeError: - raise nx.NetworkXError(\ - "spectral() takes an adjacency matrix as input") + msg = "spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) # form Laplacian matrix # make sure we have an array instead of a matrix - A=np.asarray(A) - I=np.identity(nnodes,dtype=A.dtype) - D=I*np.sum(A,axis=1) # diagonal of degrees - L=D-A + A = np.asarray(A) + I = np.identity(nnodes, dtype=A.dtype) + D = I * np.sum(A, axis=1) # diagonal of degrees + L = D - A - eigenvalues,eigenvectors=np.linalg.eig(L) + eigenvalues, eigenvectors = np.linalg.eig(L) # sort and keep smallest nonzero - index=np.argsort(eigenvalues)[1:dim+1] # 0 index is zero eigenvalue - return np.real(eigenvectors[:,index]) + index = np.argsort(eigenvalues)[1:dim + 1] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + -def _sparse_spectral(A,dim=2): +def _sparse_spectral(A, dim=2): # Input adjacency matrix A # Uses sparse eigenvalue solver from scipy # Could use multilevel methods here, see Koren "On spectral graph drawing" @@ -558,44 +576,68 @@ def _sparse_spectral(A,dim=2): import numpy as np from scipy.sparse import spdiags except ImportError: - raise ImportError("_sparse_spectral() requires scipy & numpy: http://scipy.org/ ") + msg = "_sparse_spectral() requires scipy & numpy: http://scipy.org/ " + raise ImportError(msg) try: from scipy.sparse.linalg.eigen import eigsh except ImportError: # scipy <0.9.0 names eigsh differently from scipy.sparse.linalg import eigen_symmetric as eigsh try: - nnodes,_=A.shape + nnodes, _ = A.shape except AttributeError: - raise nx.NetworkXError(\ - "sparse_spectral() takes an adjacency matrix as input") + msg = "sparse_spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) # form Laplacian matrix - data=np.asarray(A.sum(axis=1).T) - D=spdiags(data,0,nnodes,nnodes) - L=D-A + data = np.asarray(A.sum(axis=1).T) + D = spdiags(data, 0, nnodes, nnodes) + L = D - A - k=dim+1 + k = dim + 1 # number of Lanczos vectors for ARPACK solver.What is the right scaling? - ncv=max(2*k+1,int(np.sqrt(nnodes))) + ncv = max(2 * k + 1, int(np.sqrt(nnodes))) # return smallest k eigenvalues and eigenvectors - eigenvalues,eigenvectors=eigsh(L,k,which='SM',ncv=ncv) - index=np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue - return np.real(eigenvectors[:,index]) + eigenvalues, eigenvectors = eigsh(L, k, which='SM', ncv=ncv) + index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def rescale_layout(pos, scale=1): + """Return scaled position array to (-scale, scale) in all axes. + The function acts on NumPy arrays which hold position information. + Each position is one row of the array. The dimension of the space + equals the number of columns. Each coordinate in one column. + + To rescale, the mean (center) is subtracted from each axis separately. + Then all values are scaled so that the largest magnitude value + from all axes equals `scale` (thus, the aspect ratio is preserved). + The resulting NumPy Array is returned (order of rows unchanged). + + Parameters + ---------- + pos : numpy array + positions to be scaled. Each row is a position. -def _rescale_layout(pos,scale=1): - # rescale to (-scale,scale) in all axes + scale : number (default: 1) + The size of the resulting extent in all directions. + Returns + ------- + pos : numpy array + scaled positions. Each row is a position. + + """ # Find max length over all dimensions - lim=0 # max coordinate for all axes + lim = 0 # max coordinate for all axes for i in range(pos.shape[1]): - pos[:,i]-=pos[:,i].mean() - lim=max(pos[:,i].max(),lim) - # rescale to (-scale,scale) in all directions, preserves aspect + pos[:, i] -= pos[:, i].mean() + lim = max(pos[:, i].max(), lim) + # rescale to (-scale, scale) in all directions, preserves aspect if lim > 0: for i in range(pos.shape[1]): - pos[:,i]*=scale/lim + pos[:, i] *= scale / lim return pos @@ -610,17 +652,3 @@ def setup_module(module): import scipy except: raise SkipTest("SciPy not available") - -def flatten(l): - try: - bs = basestring - except NameError: - # Py3k - bs = str - for el in l: - if isinstance(el, collections.Iterable) and not isinstance(el, bs): - for sub in flatten(el): - yield sub - else: - yield el -
Inconsistent signatures for layout methods I noticed some good additions have been made to layouts for the 1.10 version. However, I also noticed that `random_layout` is the only method lacking the `scale` parameter. ``` random_layout(G, dim=2, center=None) circular_layout(G, dim=2, scale=1, center=None) ``` The `center` parameter has been added in v1.10. Adding `scale` as well would have made it more intuitive to switch between the two layouts. A similar argument could be made for other methods, the signatures are quite inconsistent. Notice the order of parameters `dim`, `scale`, `center` and `weight`. ``` circular_layout(G[, dim, scale, center]) random_layout(G[, dim, center]) shell_layout(G[, nlist, dim, scale, center]) spring_layout(G, dim, k, pos, fixed, iterations, weight, scale, center) spectral_layout(G, dim, weight, scale, center) ``` They could be re-organized for v2, putting parameters with the same name in the same place ``` circular_layout(G[, dim, scale, center]) random_layout(G[, dim, scale, center]) shell_layout(G[, dim, scale, center, nlist]) spring_layout(G[, dim, scale, center, weight, k, pos, fixed, iterations]) spectral_layout(G[, dim, scale, center, weight]) ``` EDIT: another welcome addition would be a `seed` parameter for `random_layout` and possibly also for `spring_layout` (right now there's no way to make it deterministic).
I don't think we should push too hard on making the parameters the same for all the layout functions. In particular the `random_layout` was specifically chosen to be different. `circular_` and `shell_` naturally surround the origin, while `random_` and `spring_` are more natural on the unit square. I'm not sure `spectral_` should even have a "center" argument at all. The `random_layout` is especially important to be clear that the size affects the range of "possible values" and not the range of actual results. If you choose uniformly random values on `[0,1)` and then rescale them so the max value is "size", then it's not easy to figure out the random distribution you chose from. Much better to choose uniformly random on `[0, size)`. I think this ticket is really about aligning expectations of users with the code. Users expect the arguments to be the same -- and we should be more clear that the layout range is different for different layouts. Perhaps the differences would be more clear if we used different names for the arguments in layouts which return circular results from those with square. For example, "scale" and "center" for circular domains vs "size" and "shift" for square domains. ``` circular_layout(G, scale, center]) shell_layout(G, nlist, scale, center) random_layout(G, size, shift) spring_layout(G, weight, k, pos, fixed, iterations, size, shift) ``` I don't particularly care about the order of the parameters. But if we're going to go there then `dim` should not be the first argument. The default is almost always used. Perhaps the last three should be `scale, center, dim)` so that the layout specific arguments can be used without keywords: e.g. `spectral_layout(G, "weight")`. I would propose (leaving spectral's return values as they are because I don't have a better suggestion): ``` circular_layout(G[, scale, center, dim]) --> [-scale, scale] + center shell_layout(G[, nlist, scale, center, dim]) random_layout(G[, size, shift, dim]) ---> [0, size] + shift spring_layout(G[, weight, k, pos, fixed, iterations, size, shift, dim]) spectral_layout(G[, weight, scale, center, dim]) ---> [-scale, scale] + center ``` @AgostinoSturaro what do you think? Thank you for asking. I started the topic when 1.10 was released and I noticed that I had to manually rescale some methods and not some others. That collided with my immediate expectations. Now, if I stop and think about it, there is a reason for `random_layout` to be the only one without a `scale` parameter. But if we talk about expectations, that was definitely unexpected. Having a reasonable `scale` (or `size`) parameter can be handy, and choosing the distribution in `[0, scale)`, or `[0, size)` should get reasonable results. I agree that scaling the `random_layout` would skew the distribution, but the fact that there are both `dim` and `scale` parameters for layouts is the "problem" here. A note in the documentation could help distinguish between the role of the two parameters. I see that some layouts produce a graph in `[-scale, scale]`, while others produce a graph in `[0, size]`, and this may not be very intuitive at first, but it's easy to see the effects by running the functions a couple of times. I understand why you suggest using a `shift` parameter for `random_layout` instead of `center`. However, the `center` parameter could have the same meaning for all layouts. It can default at the origin (0, 0) for `circular_layout` and at (0.5, 0.5) for `random_layout`. It's true that (0.5, 0.5) is not the exact center of [0, 1), but it should fit expectations. On the other hand, if `center` "shifts" the `random_layout`, then probably it should be called it shift ;) I see no optimal solution. You decide what to do here. About `dim`, up to v1.9.1, it was the first parameter for all layouts, except for `shell_layout`. ``` circular_layout(G[, dim, scale]) random_layout(G[, dim]) shell_layout(G[, nlist, dim, scale]) # here spring_layout(G[, dim, k, pos, fixed, ...]) spectral_layout(G[, dim, weight, scale]) ``` In fact, that tricked me when I used `shell_layout`, because I memorized `dim` to be in the second position, and so I expected it to be there. To prevent breakages for a minor version (v1.11), I propose these changes only ``` circular_layout(G[, dim, scale, center]) # where center defaults to (0, 0) random_layout(G[, dim, scale, center]) # where center defaults to (0.5, 0.5) ``` I agree on your idea of moving the less used parameters at the end. This could be done for v2, to prevent API breakages, right? Notice how, `shell_layout`, `spring_layout` and `spectral_layout` all have `dim` and `scale` parameters, but in different order, and there is no particular reason for the different order. Here is my proposal for them ``` shell_layout(G[, nlist, dim, scale, center]) spring_layout(G[, k, pos, fixed, iterations, weight, dim, scale, center]) spectral_layout(G[, weight, dim, scale, center]) ``` With those changes, `dim, scale, center` would always be at the end, and we would gain consistency. @dschult What do you think? Does this need fixing for v1.11? #1760 was merged back in Sept to v1.11 to fix this issue for that version. It added scale and center for all but random_layout which did not get the scale parameter (because scaling the points to be in a specific range makes the random process wonky--scaling the potential domain makes sense but differs from the other usage of the keyword `scale`). I plan to revisit this for v2, but thought we should not do the other changes on a minor release. Discussion came back to this issue when #1892 was submitted. but I think of it as being for v2 not for v1.11 The proposal for v1.11 above (two comments up) suggests changes to `circular_layout()` that have already been made and then to add `scale` to `random_layout()` which I had decided not to do because it is potentially confusing.
2016-04-19T17:20:20
networkx/networkx
2,101
networkx__networkx-2101
[ "1619" ]
545120b643d7d1bdbe54a02fa3d45d917d140b1b
diff --git a/networkx/algorithms/operators/binary.py b/networkx/algorithms/operators/binary.py --- a/networkx/algorithms/operators/binary.py +++ b/networkx/algorithms/operators/binary.py @@ -316,6 +316,10 @@ def compose(G, H, name=None): ----- It is recommended that G and H be either both directed or both undirected. Attributes from H take precedent over attributes from G. + + For MultiGraphs, the edges are identified by incident nodes AND edge-key. + This can cause surprises (i.e., edge `(1, 2)` may or may not be the same + in two graphs) if you use MultiGraph without keeping track of edge keys. """ if not G.is_multigraph() == H.is_multigraph(): raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
compose() erases some MultiGraph edges If a user wants to compose two multigraphs, it is very likely that they want to use all of the edges present in both. In MultiGraphs, edges which share the same (source, target) pair are not the same. Currently, edges which share the same (source, target, key) tuple are treated the same: as keys are assigned in insertion order by default, rather than having anything to do with the data, the end user just sees that an arbitrary few of their edges have gone missing. ``` python import networkx as nx a, b = nx.MultiGraph(), nx.MultiGraph() a.add_path([1,2]) b.add_path([1,2]) nx.compose(a,b).number_of_edges() == a.number_of_edges() + b.number_of_edges() >>> False ``` The documentation states that the edge sets are unioned. If this edge set is hashed by (source, target) pair, then the function cannot be advertised as applicable to MultiGraphs, because it collapses all multiedges. If the edge set is hashed by (source, target, key), as it is currently, then there is unexpected and possibly arbitrary behaviour which is not well documented. The edge set should be hashed by a UUID for MultiGraphs (i.e. all edges are distinct), in order to reflect how these classes are actually going to be used.
I can imagine users being confused by this if they use MultiGraphs without setting keys (which many don't). On the other hand, changing it as you suggest could be confusing to users who do pay attention to the edge keys. At the very least a note should be included in the docs. Perhaps there should be an argument to describe how to treat the edge keys.
2016-04-22T16:50:54
networkx/networkx
2,103
networkx__networkx-2103
[ "1489" ]
19dd6ce4fc89aad7ebf80e177673686bcf79313e
diff --git a/networkx/generators/__init__.py b/networkx/generators/__init__.py --- a/networkx/generators/__init__.py +++ b/networkx/generators/__init__.py @@ -13,6 +13,7 @@ from networkx.generators.geometric import * from networkx.generators.intersection import * from networkx.generators.joint_degree_seq import * +from networkx.generators.lattice import * from networkx.generators.line import * from networkx.generators.nonisomorphic_trees import * from networkx.generators.random_clustered import * diff --git a/networkx/generators/classic.py b/networkx/generators/classic.py --- a/networkx/generators/classic.py +++ b/networkx/generators/classic.py @@ -1,29 +1,31 @@ -""" -Generators for some classic graphs. +# Copyright (C) 2004-2017 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Authors: Aric Hagberg ([email protected]) +# Pieter Swart ([email protected]) +"""Generators for some classic graphs. The typical graph generator is called as follows: ->>> G=nx.complete_graph(100) +>>> G = nx.complete_graph(100) returning the complete graph on n nodes labeled 0, .., 99 as a simple graph. Except for empty_graph, all the generators in this module return a Graph class (i.e. a simple, undirected graph). """ -# Authors: Aric Hagberg ([email protected]) and Pieter Swart ([email protected]) - -# Copyright (C) 2004-2016 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. from __future__ import division import itertools import networkx as nx from networkx.algorithms.bipartite.generators import complete_bipartite_graph +from networkx.classes import Graph +from networkx.exception import NetworkXError from networkx.utils import accumulate from networkx.utils import flatten from networkx.utils import nodes_or_number @@ -39,9 +41,6 @@ 'dorogovtsev_goltsev_mendes_graph', 'empty_graph', 'full_rary_tree', - 'grid_graph', - 'grid_2d_graph', - 'hypercube_graph', 'ladder_graph', 'lollipop_graph', 'null_graph', @@ -52,9 +51,9 @@ 'wheel_graph'] -#------------------------------------------------------------------- +# ------------------------------------------------------------------- # Some Classic Graphs -#------------------------------------------------------------------- +# ------------------------------------------------------------------- def _tree_edges(n, r): # helper function for trees @@ -101,7 +100,7 @@ def full_rary_tree(r, n, create_using=None): .. [1] An introduction to data structures and algorithms, James Andrew Storer, Birkhauser Boston 2001, (page 225). """ - G = nx.empty_graph(n, create_using) + G = empty_graph(n, create_using) G.add_edges_from(_tree_edges(n, r)) return G @@ -165,8 +164,8 @@ def barbell_graph(m1, m2, create_using=None): `m1, ..., m1+m2-1` for the path, and `m1+m2, ..., 2*m1+m2-1` for the right barbell. - The 3 subgraphs are joined via the edges `(m1-1, m1)` and - `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete + The 3 subgraphs are joined via the edges `(m1-1, m1)` and + `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete graphs joined together. This graph is an extremal example in David Aldous @@ -174,12 +173,12 @@ def barbell_graph(m1, m2, create_using=None): """ if create_using is not None and create_using.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") + raise NetworkXError("Directed Graph not supported") if m1 < 2: - raise nx.NetworkXError( + raise NetworkXError( "Invalid graph description, m1 should be >=2") if m2 < 0: - raise nx.NetworkXError( + raise NetworkXError( "Invalid graph description, m2 should be >=0") # left barbell @@ -220,7 +219,7 @@ def complete_graph(n, create_using=None): 9 >>> G.size() 36 - >>> G = nx.complete_graph(range(11,14)) + >>> G = nx.complete_graph(range(11, 14)) >>> list(G.nodes()) [11, 12, 13] >>> G = nx.complete_graph(4, nx.DiGraph()) @@ -277,9 +276,9 @@ def circulant_graph(n, offsets, create_using=None): Examples -------- - Many well-known graph families are subfamilies of the circulant graphs; for - example, to generate the cycle graph on n points, we connect every vertex to - every other at offset plus or minus one. For n = 10, + Many well-known graph families are subfamilies of the circulant graphs; + for example, to generate the cycle graph on n points, we connect every + vertex to every other at offset plus or minus one. For n = 10, >>> import networkx >>> G = networkx.generators.classic.circulant_graph(10, [1]) @@ -335,7 +334,7 @@ def cycle_graph(n, create_using=None): n_orig, nodes = n G = empty_graph(nodes, create_using) G.name = "cycle_graph(%s)" % (n_orig,) - G.add_edges_from(nx.utils.pairwise(nodes)) + G.add_edges_from(pairwise(nodes)) G.add_edge(nodes[-1], nodes[0]) return G @@ -349,9 +348,9 @@ def dorogovtsev_goltsev_mendes_graph(n, create_using=None): """ if create_using is not None: if create_using.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") + raise NetworkXError("Directed Graph not supported") if create_using.is_multigraph(): - raise nx.NetworkXError("Multigraph not supported") + raise NetworkXError("Multigraph not supported") G = empty_graph(0, create_using) G.name = "Dorogovtsev-Goltsev-Mendes Graph" G.add_edge(0, 1) @@ -382,12 +381,12 @@ def empty_graph(n=0, create_using=None): with the new graph. Usually used to set the type of the graph. For example: - >>> G=nx.empty_graph(10) + >>> G = nx.empty_graph(10) >>> G.number_of_nodes() 10 >>> G.number_of_edges() 0 - >>> G=nx.empty_graph("ABC") + >>> G = nx.empty_graph("ABC") >>> G.number_of_nodes() 3 >>> sorted(G) @@ -405,8 +404,8 @@ def empty_graph(n=0, create_using=None): Firstly, the variable create_using can be used to create an empty digraph, multigraph, etc. For example, - >>> n=10 - >>> G=nx.empty_graph(n, create_using=nx.DiGraph()) + >>> n = 10 + >>> G = nx.empty_graph(n, create_using=nx.DiGraph()) will create an empty digraph on n nodes. @@ -421,7 +420,7 @@ def empty_graph(n=0, create_using=None): """ if create_using is None: # default empty graph is a simple graph - G = nx.Graph() + G = Graph() else: G = create_using G.clear() @@ -432,116 +431,17 @@ def empty_graph(n=0, create_using=None): return G -@nodes_or_number([0, 1]) -def grid_2d_graph(m, n, periodic=False, create_using=None): - """ Return the 2d grid graph of mxn nodes - - The grid graph has each node connected to its four nearest neighbors. - - Parameters - ========== - m, n : int or iterable container of nodes (default = 0) - If an integer, nodes are from `range(n)`. - If a container, those become the coordinate of the node. - periodic : bool (default = False) - If True will connect boundary nodes in periodic fashion. - create_using : Graph, optional (default Graph()) - If provided this graph is cleared of nodes and edges and filled - with the new graph. Usually used to set the type of the graph. - """ - G = empty_graph(0, create_using) - row_name, rows = m - col_name, columns = n - G.name = "grid_2d_graph(%s, %s)" % (row_name, col_name) - G.add_nodes_from((i, j) for i in rows for j in columns) - G.add_edges_from(((i, j), (pi, j)) - for pi, i in pairwise(rows) for j in columns) - G.add_edges_from(((i, j), (i, pj)) - for i in rows for pj, j in pairwise(columns)) - if G.is_directed(): - G.add_edges_from(((pi, j), (i, j)) - for pi, i in pairwise(rows) for j in columns) - G.add_edges_from(((i, pj), (i, j)) - for i in rows for pj, j in pairwise(columns)) - if periodic: - if len(columns) > 2: - f = columns[0] - l = columns[-1] - G.add_edges_from(((i, f), (i, l)) for i in rows) - if G.is_directed(): - G.add_edges_from(((i, l), (i, f)) for i in rows) - if len(rows) > 2: - f = rows[0] - l = rows[-1] - G.add_edges_from(((f, j), (l, j)) for j in columns) - if G.is_directed(): - G.add_edges_from(((l, j), (f, j)) for j in columns) - G.name = "periodic_grid_2d_graph(%s,%s)" % (m, n) - return G - - -def grid_graph(dim, periodic=False): - """ Return the n-dimensional grid graph. - - 'dim' is a tuple or list with the size in each dimension or an - iterable of nodes for each dimension. The dimension of - the grid_graph is the length of the tuple or list 'dim'. - - E.g. G=grid_graph(dim=[2, 3]) produces a 2x3 grid graph. - - E.g. G=grid_graph(dim=[range(7, 9), range(3, 6)]) produces a 2x3 grid graph. - - If periodic=True then join grid edges with periodic boundary conditions. - - """ - dlabel = "%s" % str(dim) - if not dim: - G = empty_graph(0) - G.name = "grid_graph(%s)" % dlabel - return G - if periodic: - func = cycle_graph - else: - func = path_graph - - G = func(dim[0]) - for current_dim in dim[1:]: - # order matters: copy before it is cleared during the creation of Gnew - Gold = G.copy() - Gnew = func(current_dim) - # explicit: create_using=None - # This is so that we get a new graph of Gnew's class. - G = nx.cartesian_product(Gnew, Gold) - # graph G is done but has labels of the form (1, (2, (3, 1))) - # so relabel - H = nx.relabel_nodes(G, flatten) - H.name = "grid_graph(%s)" % dlabel - return H - - -def hypercube_graph(n): - """Return the n-dimensional hypercube. - - Node labels are the integers 0 to 2**n - 1. - - """ - dim = n * [2] - G = grid_graph(dim) - G.name = "hypercube_graph_(%d)" % n - return G - - def ladder_graph(n, create_using=None): """Return the Ladder graph of length n. - This is two rows of n nodes, with + This is two paths of n nodes, with each pair connected by a single edge. Node labels are the integers 0 to 2*n - 1. """ if create_using is not None and create_using.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") + raise NetworkXError("Directed Graph not supported") G = empty_graph(2 * n, create_using) G.name = "ladder_graph_(%d)" % n G.add_edges_from(pairwise(range(n))) @@ -570,7 +470,7 @@ def lollipop_graph(m, n, create_using=None): Notes ===== - The 2 subgraphs are joined via an edge (m-1, m). + The 2 subgraphs are joined via an edge (m-1, m). If n=0, this is merely a complete graph. (This graph is an extremal example in David Aldous and Jim @@ -584,12 +484,12 @@ def lollipop_graph(m, n, create_using=None): if isinstance(m, int): n_nodes = [len(m_nodes) + i for i in n_nodes] if create_using is not None and create_using.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") + raise NetworkXError("Directed Graph not supported") if M < 2: - raise nx.NetworkXError( + raise NetworkXError( "Invalid graph description, m should be >=2") if N < 0: - raise nx.NetworkXError( + raise NetworkXError( "Invalid graph description, n should be >=0") # the ball @@ -633,14 +533,14 @@ def path_graph(n, create_using=None): n_name, nodes = n G = empty_graph(nodes, create_using) G.name = "path_graph(%s)" % (n_name,) - G.add_edges_from(nx.utils.pairwise(nodes)) + G.add_edges_from(pairwise(nodes)) return G @nodes_or_number(0) def star_graph(n, create_using=None): """ Return the star graph - + The star graph consists of one center node connected to n outer nodes. Parameters @@ -663,7 +563,7 @@ def star_graph(n, create_using=None): first = nodes[0] G = empty_graph(nodes, create_using) if G.is_directed(): - raise nx.NetworkXError("Directed Graph not supported") + raise NetworkXError("Directed Graph not supported") G.add_edges_from((first, v) for v in nodes[1:]) G.name = "star_graph(%s)" % (n_name,) return G @@ -704,7 +604,7 @@ def turan_graph(n, r): """ if not 1 <= r <= n: - raise nx.NetworkXError("Must satisfy 1 <= r <= n") + raise NetworkXError("Must satisfy 1 <= r <= n") partitions = [n//r]*(r-(n%r))+[n//r+1]*(n%r) G = complete_multipartite_graph(*partitions) @@ -715,7 +615,7 @@ def turan_graph(n, r): @nodes_or_number(0) def wheel_graph(n, create_using=None): """ Return the wheel graph - + The wheel graph consists of a hub node connected to a cycle of (n-1) nodes. Parameters @@ -731,7 +631,7 @@ def wheel_graph(n, create_using=None): """ n_name, nodes = n if n_name == 0: - G = nx.empty_graph(0, create_using=create_using) + G = empty_graph(0, create_using=create_using) G.name = "wheel_graph(0)" return G G = star_graph(nodes, create_using) @@ -798,7 +698,7 @@ def complete_multipartite_graph(*subset_sizes): complete_bipartite_graph """ # The complete multipartite graph is an undirected simple graph. - G = nx.Graph() + G = Graph() G.name = 'complete_multiparite_graph{}'.format(subset_sizes) if len(subset_sizes) == 0: @@ -817,7 +717,7 @@ def complete_multipartite_graph(*subset_sizes): for (i, subset) in enumerate(subsets): G.add_nodes_from(subset, subset=i) except TypeError: - raise nx.NetworkXError("Arguments must be all ints or all iterables") + raise NetworkXError("Arguments must be all ints or all iterables") # Across subsets, all vertices should be adjacent. # We can use itertools.combinations() because undirected. diff --git a/networkx/generators/lattice.py b/networkx/generators/lattice.py new file mode 100644 --- /dev/null +++ b/networkx/generators/lattice.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2004-2017 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Authors: Aric Hagberg ([email protected]) +# Pieter Swart ([email protected]) +# Joel Miller ([email protected]) +# Dan Schult ([email protected]) +"""Functions for generating grid graphs and lattices + +The :func:`grid_2d_graph`, :func:`triangular_lattice_graph`, and +:func:`hexagonal_lattice_graph` functions correspond to the three +`regular tilings of the plane`_, the square, triangular, and hexagonal +tilings, respectively. :func:`grid_graph` and :func:`hypercube_graph` +are similar for arbitrary dimensions. Useful relevent discussion can +be found about `Triangular Tiling`_, and `Square, Hex and Triangle Grids`_ + +.. _regular tilings of the plane: https://en.wikipedia.org/wiki/List_of_regular_polytopes_and_compounds#Euclidean_tilings +.. _Square, Hex and Triangle Grids: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ +.. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling + +""" +from __future__ import division + +from math import sqrt + +from networkx.classes import Graph +from networkx.classes import set_node_attributes +from networkx.algorithms.minors import contracted_nodes +from networkx.algorithms.operators.product import cartesian_product +from networkx.exception import NetworkXError +from networkx.relabel import relabel_nodes +from networkx.utils import flatten +from networkx.utils import is_list_of_ints +from networkx.utils import nodes_or_number +from networkx.utils import pairwise +from networkx.generators.classic import cycle_graph +from networkx.generators.classic import empty_graph +from networkx.generators.classic import path_graph + +__all__ = ['grid_2d_graph', 'grid_graph', 'hypercube_graph', + 'triangular_lattice_graph', 'hexagonal_lattice_graph'] + + +@nodes_or_number([0, 1]) +def grid_2d_graph(m, n, periodic=False, create_using=None): + """Returns the two-dimensional grid graph. + + The grid graph has each node connected to its four nearest neighbors. + + Parameters + ---------- + m, n : int or iterable container of nodes + If an integer, nodes are from `range(n)`. + If a container, elements become the coordinate of the nodes. + + periodic : bool (default: False) + If this is ``True`` the nodes on the grid boundaries are joined + to the corresponding nodes on the opposite grid boundaries. + + create_using : NetworkX graph (default: Graph()) + If provided this graph is cleared of nodes and edges and filled + with the new graph. Usually used to set the type of the graph. + + Returns + ------- + NetworkX graph + The (possibly periodic) grid graph of the specified dimensions. + + """ + G = empty_graph(0, create_using) + row_name, rows = m + col_name, cols = n + G.add_nodes_from((i, j) for i in rows for j in cols) + G.add_edges_from(((i, j), (pi, j)) + for pi, i in pairwise(rows) for j in cols) + G.add_edges_from(((i, j), (i, pj)) + for i in rows for pj, j in pairwise(cols)) + if periodic is True: + if len(rows) > 2: + first = rows[0] + last = rows[-1] + G.add_edges_from(((first, j), (last, j)) for j in cols) + if len(cols) > 2: + first = cols[0] + last = cols[-1] + G.add_edges_from(((i, first), (i, last)) for i in rows) + # both directions for directed + if G.is_directed(): + G.add_edges_from((v, u) for u, v in G.edges()) + + # set name + G.name = "grid_2d_graph(%s, %s)" % (row_name, col_name) + if periodic is True: + G.name = "periodic_" + G.name + return G + + +def grid_graph(dim, periodic=False): + """Returns the *n*-dimensional grid graph. + + The dimension *n* is the length of the list `dim` and the size in + each dimension is the value of the corresponding list element. + + Parameters + ---------- + dim : list or tuple of numbers or iterables of nodes + 'dim' is a tuple or list with, for each dimension, either a number + that is the size of that dimension or an iterable of nodes for + that dimension. The dimension of the grid_graph is the length + of `dim`. + + periodic : bool + If `periodic is True` the nodes on the grid boundaries are joined + to the corresponding nodes on the opposite grid boundaries. + + Returns + ------- + NetworkX graph + The (possibly periodic) grid graph of the specified dimensions. + + Examples + -------- + To produce a 2 by 3 by 4 grid graph, a graph on 24 nodes:: + + >>> G = grid_graph(dim=[2, 3, 4]) + >>> len(G) + 24 + >>> G = grid_graph(dim=[range(7, 9), range(3, 6)]) + >>> len(G) + 6 + """ + dlabel = "%s" % dim + if not dim: + G = empty_graph(0) + G.name = "grid_graph(%s)" % dlabel + return G + + func = cycle_graph if periodic else path_graph + G = func(dim[0]) + for current_dim in dim[1:]: + # order matters: copy before it is cleared during the creation of Gnew + Gold = G.copy() + Gnew = func(current_dim) + # explicit: create_using = None + # This is so that we get a new graph of Gnew's class. + G = cartesian_product(Gnew, Gold) + # graph G is done but has labels of the form (1, (2, (3, 1))) so relabel + H = relabel_nodes(G, flatten) + H.name = "grid_graph(%s)" % dlabel + return H + + +def hypercube_graph(n): + """Returns the *n*-dimensional hypercube graph. + + The nodes are the integers between 0 and ``2 ** n - 1``, inclusive. + + For more information on the hypercube graph, see the Wikipedia + article *`Hypercube graph`_*. + + .. _Hypercube graph: https://en.wikipedia.org/wiki/Hypercube_graph + + Parameters + ---------- + n : int + The dimension of the hypercube. + The number of nodes in the graph will be ``2 ** n``. + + Returns + ------- + NetworkX graph + The hypercube graph of dimension *n*. + """ + dim = n * [2] + G = grid_graph(dim) + G.name = "hypercube_graph_(%d)" % n + return G + + +def triangular_lattice_graph(m, n, periodic=False, with_positions=True, + create_using=None): + """Returns the *m* by *n* triangular lattice graph. + + The *`triangular lattice graph`_* is a two-dimensional `grid graph`_ in + which each square unit has a diagonal edge (each grid unit has a chord). + + The returned graph has `m` rows and `n` columns of triangles. Rows and + columns include both triangles pointing up and down. Rows form a strip + of constant height. Columns form a series of diamond shapes, staggered + with the columns on either side. Another way to state the size is that + the nodes form a grid of `m+1` rows and `(n + 1) // 2` columns. + The odd row nodes are shifted horizontally relative to the even rows. + + Directed graph types have edges pointed up or right. + + Positions of nodes are computed by default or `with_positions is True`. + The position of each node (embedded in a euclidean plane) is stored in + the graph using equilateral triangles with sidelength 1. + The height between rows of nodes is thus :math:`\sqrt(3)/2`. + Nodes lie in the first quadrant with the node `(0, 0)` at the origin. + + .. _triangular lattice graph: http://mathworld.wolfram.com/TriangularGrid.html + .. _grid graph: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ + .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling + + Parameters + ---------- + m : int + The number of rows in the lattice. + + n : int + The number of columns in the lattice. + + periodic : bool (default: False) + If True, join the boundary vertices of the grid using periodic + boundary conditions. The join between boundaries is the final row + and column of triangles. This means there is one row and one column + fewer nodes for the periodic lattice. Periodic lattices require + `m >= 3`, `n >= 5` and are allowed but misaligned if `m` or `n` are odd + + with_positions : bool (default: True) + Store the coordinates of each node in the graph node attribute 'pos'. + The coordinates provide a lattice with equilateral triangles. + Periodic positions shift the nodes vertically in a nonlinear way so + the edges don't overlap so much. + + create_using : NetworkX graph + If specified, this must be an instance of a NetworkX graph + class. It will be cleared of nodes and edges and filled + with the new graph. Usually used to set the type of the graph. + + Returns + ------- + NetworkX graph + The *m* by *n* triangular lattice graph. + """ + H = empty_graph(0, create_using) + if n == 0 or m == 0: + return H + if periodic: + if n < 5 or m < 3: + msg = "m > 2 and n > 4 required for periodic. m={}, n={}" + raise NetworkXError(msg.format(m, n)) + + N = (n + 1) // 2 # number of nodes in row + rows = range(m + 1) + cols = range(N + 1) + # Make grid + H.add_edges_from(((i, j), (i + 1, j)) for j in rows for i in cols[:N]) + H.add_edges_from(((i, j), (i, j + 1)) for j in rows[:m] for i in cols) + # add diagonals + H.add_edges_from(((i, j), (i + 1, j + 1)) + for j in rows[1:m:2] for i in cols[:N]) + H.add_edges_from(((i + 1, j), (i, j + 1)) + for j in rows[:m:2] for i in cols[:N]) + # identify boundary nodes if periodic + if periodic is True: + for i in cols: + H = contracted_nodes(H, (i, 0), (i, m)) + for j in rows[:m]: + H = contracted_nodes(H, (0, j), (N, j)) + elif n % 2: + # remove extra nodes + H.remove_nodes_from(((N, j) for j in rows[1::2])) + + # Add position node attributes + if with_positions: + ii = (i for i in cols for j in rows) + jj = (j for i in cols for j in rows) + xx = (0.5 * (j % 2) + i for i in cols for j in rows) + h = sqrt(3)/2 + if periodic: + yy = (h * j + .01 * i * i for i in cols for j in rows) + else: + yy = (h * j for i in cols for j in rows) + pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) + if (i, j) in H} + set_node_attributes(H, 'pos', pos) + + # set the name + H.name = 'triangular_lattice_graph({}, {})'.format(m, n) + if periodic: + H.name = 'periodic_' + H.name + return H + + +def hexagonal_lattice_graph(m, n, periodic=False, with_positions=True, + create_using=None): + """Returns an `m` by `n` hexagonal lattice graph. + + The *hexagonal lattice graph* is a graph whose nodes and edges are + the `hexagonal tiling`_ of the plane. + + The returned graph will have `m` rows and `n` columns of hexagons. + `Odd numbered columns`_ are shifted up relative to even numbered columns. + + Positions of nodes are computed by default or `with_positions is True`. + Node positions creating the standard embedding in the plane + with sidelength 1 and are stored in the node attribute 'pos'. + `pos = nx.get_node_attributes(G, 'pos')` creates a dict ready for drawing. + + .. _hexagonal tiling: https://en.wikipedia.org/wiki/Hexagonal_tiling + .. _Odd numbered columns: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/ + + Parameters + ---------- + m : int + The number of rows of hexagons in the lattice. + + n : int + The number of columns of hexagons in the lattice. + + periodic : bool + Whether to make a periodic grid by joining the boundary vertices. + For this to work `n` must be odd and both `n > 1` and `m > 1`. + The periodic connections create another row and column of hexagons + so these graphs have fewer nodes as boundary nodes are identified. + + with_positions : bool (default: True) + Store the coordinates of each node in the graph node attribute 'pos'. + The coordinates provide a lattice with vertical columns of hexagons + offset to interleave and cover the plane. + Periodic positions shift the nodes vertically in a nonlinear way so + the edges don't overlap so much. + + create_using : NetworkX graph + If specified, this must be an instance of a NetworkX graph + class. It will be cleared of nodes and edges and filled + with the new graph. Usually used to set the type of the graph. + If graph is directed, edges will point up or right. + + Returns + ------- + NetworkX graph + The *m* by *n* hexagonal lattice graph. + """ + G = create_using if create_using is not None else Graph() + G.clear() + if m == 0 or n == 0: + return G + if periodic and (n % 2 == 1 or m < 2 or n < 2): + msg = "periodic hexagonal lattice needs m > 1, n > 1 and even n" + raise NetworkXError(msg) + + M = 2 * m # twice as many nodes as hexagons vertically + rows = range(M + 2) + cols = range(n + 1) + # make lattice + col_edges = (((i, j), (i, j + 1)) for i in cols for j in rows[:M + 1]) + row_edges = (((i, j), (i + 1, j)) for i in cols[:n] for j in rows + if i % 2 == j % 2) + G.add_edges_from(col_edges) + G.add_edges_from(row_edges) + # Remove corner nodes with one edge + G.remove_node((0, M + 1)) + G.remove_node((n, (M + 1) * (n % 2))) + + # identify boundary nodes if periodic + if periodic: + for i in cols[:n]: + G = contracted_nodes(G, (i, 0), (i, M)) + for i in cols[1:]: + G = contracted_nodes(G, (i, 1), (i, M + 1)) + for j in rows[1:M]: + G = contracted_nodes(G, (0, j), (n, j)) + G.remove_node((n, M)) + + # calc position in embedded space + ii = (i for i in cols for j in rows) + jj = (j for i in cols for j in rows) + xx = (0.5 + i + i // 2 + (j % 2) * ((i % 2) - .5) + for i in cols for j in rows) + h = sqrt(3)/2 + if periodic: + yy = (h * j + .01*i*i for i in cols for j in rows) + else: + yy = (h * j for i in cols for j in rows) + # exclude nodes not in G + pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in G} + set_node_attributes(G, 'pos', pos) + + # set the name + G.name = 'hexagonal_lattice_graph({}, {})'.format(m, n) + if periodic: + G.name = 'periodic_' + G.name + return G
diff --git a/networkx/generators/tests/test_classic.py b/networkx/generators/tests/test_classic.py --- a/networkx/generators/tests/test_classic.py +++ b/networkx/generators/tests/test_classic.py @@ -257,82 +257,6 @@ def test_empty_graph(self): assert_equal(G.name, 'empty_graph(42)') assert_true(isinstance(G,Graph)) - def test_grid_2d_graph(self): - n=5;m=6 - G=grid_2d_graph(n,m) - assert_equal(number_of_nodes(G), n*m) - assert_equal(degree_histogram(G), [0,0,4,2*(n+m)-8,(n-2)*(m-2)]) - DG=grid_2d_graph(n,m, create_using=DiGraph()) - assert_equal(DG.succ, G.adj) - assert_equal(DG.pred, G.adj) - MG=grid_2d_graph(n,m, create_using=MultiGraph()) - assert_edges_equal(MG.edges(), G.edges()) - g=grid_2d_graph(range(n), range(m)) - assert_edges_equal(g.edges(), G.edges()) - - def test_grid_graph(self): - """grid_graph([n,m]) is a connected simple graph with the - following properties: - number_of_nodes=n*m - degree_histogram=[0,0,4,2*(n+m)-8,(n-2)*(m-2)] - """ - for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]: - dim=[n,m] - g=grid_graph(dim) - assert_equal(number_of_nodes(g), n*m) - assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)]) - assert_equal(dim,[n,m]) - - for n, m in [(1, 5), (5, 1)]: - dim=[n,m] - g=grid_graph(dim) - assert_equal(number_of_nodes(g), n*m) - assert_true(is_isomorphic(g,path_graph(5))) - assert_equal(dim,[n,m]) - -# mg=grid_graph([n,m], create_using=MultiGraph()) -# assert_equal(mg.edges(), g.edges()) - - g=grid_graph([range(7,9), range(3,6)]) - assert_equal(number_of_nodes(g), 2*3) - assert_true(is_isomorphic(g, grid_graph([2,3]))) - - """Tuple dim arguments of the above tests - """ - for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]: - dim=(n,m) - g=grid_graph(dim) - assert_equal(number_of_nodes(g), n*m) - assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)]) - assert_equal(dim,(n,m)) - - for n, m in [(1, 5), (5, 1)]: - dim=(n,m) - g=grid_graph(dim) - assert_equal(number_of_nodes(g), n*m) - assert_true(is_isomorphic(g,path_graph(5))) - assert_equal(dim,(n,m)) - - g=grid_graph((range(7,9), range(3,6))) - assert_equal(number_of_nodes(g), 2*3) - assert_true(is_isomorphic(g, grid_graph((2,3)))) - - def test_hypercube_graph(self): - for n, G in [(0, null_graph()), (1, path_graph(2)), - (2, cycle_graph(4)), (3, cubical_graph())]: - g=hypercube_graph(n) - assert_true(is_isomorphic(g, G)) - - g=hypercube_graph(4) - assert_equal(degree_histogram(g), [0, 0, 0, 0, 16]) - g=hypercube_graph(5) - assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 32]) - g=hypercube_graph(6) - assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 0, 64]) - -# mg=hypercube_graph(6, create_using=MultiGraph()) -# assert_equal(mg.edges(), g.edges()) - def test_ladder_graph(self): for i, G in [(0, empty_graph(0)), (1, path_graph(2)), (2, hypercube_graph(2)), (10, grid_graph([2,10]))]: @@ -411,28 +335,6 @@ def test_path_graph(self): assert_equal(g.size(), 2) assert_true(g.is_directed()) - def test_periodic_grid_2d_graph(self): - g=grid_2d_graph(0,0, periodic=True) - assert_equal(dict(g.degree()), {}) - - for m, n, G in [(2, 2, cycle_graph(4)), (1, 7, cycle_graph(7)), - (7, 1, cycle_graph(7)), (2, 5, circular_ladder_graph(5)), - (5, 2, circular_ladder_graph(5)), (2, 4, cubical_graph()), - (4, 2, cubical_graph())]: - g=grid_2d_graph(m,n, periodic=True) - assert_true(is_isomorphic(g, G)) - - DG=grid_2d_graph(4, 2, periodic=True, create_using=DiGraph()) - assert_equal(DG.succ,g.adj) - assert_equal(DG.pred,g.adj) - MG=grid_2d_graph(4, 2, periodic=True, create_using=MultiGraph()) - assert_edges_equal(MG.edges(), g.edges()) - - gg=grid_2d_graph(range(4), range(2), periodic=True) - assert_true(is_isomorphic(gg, g)) - ggg=grid_2d_graph("abcd", "ef", periodic=True) - assert_true(is_isomorphic(ggg, g)) - def test_star_graph(self): assert_true(is_isomorphic(star_graph(0), empty_graph(1))) assert_true(is_isomorphic(star_graph(1), path_graph(2))) @@ -520,4 +422,4 @@ def test_complete_multipartite_graph(self): for (block1, block2) in itertools.combinations(blocks, 2): for u, v in itertools.product(block1, block2): assert_true(v in G[u]) - assert_not_equal(G.node[u], G.node[v]) + assert_not_equal(G.node[u], G.node[v]) \ No newline at end of file diff --git a/networkx/generators/tests/test_lattice.py b/networkx/generators/tests/test_lattice.py new file mode 100644 --- /dev/null +++ b/networkx/generators/tests/test_lattice.py @@ -0,0 +1,209 @@ +"""Unit tests for the :mod:`networkx.generators.lattice` module.""" +import itertools + +from nose.tools import assert_equal +from nose.tools import assert_true +from nose.tools import assert_raises + +import networkx as nx +from networkx.testing import assert_edges_equal + + +class TestGrid2DGraph: + """Unit tests for :func:`networkx.generators.lattice.grid_2d_graph`""" + def test_number_of_vertices(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + assert_equal(len(G), m * n) + + def test_degree_distribution(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + expected_histogram = [0, 0, 4, 2 * (m + n) - 8, (m - 2) * (n - 2)] + assert_equal(nx.degree_histogram(G), expected_histogram) + + def test_directed(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + H = nx.grid_2d_graph(m, n, create_using=nx.DiGraph()) + assert_equal(H.succ, G.adj) + assert_equal(H.pred, G.adj) + + def test_multigraph(self): + m, n = 5, 6 + G = nx.grid_2d_graph(m, n) + H = nx.grid_2d_graph(m, n, create_using=nx.MultiGraph()) + assert_equal(list(H.edges()), list(G.edges())) + + def test_periodic(self): + G = nx.grid_2d_graph(0, 0, periodic=True) + assert_equal(dict(G.degree()), {}) + + for m, n, H in [(2, 2, nx.cycle_graph(4)), (1, 7, nx.cycle_graph(7)), + (7, 1, nx.cycle_graph(7)), + (2, 5, nx.circular_ladder_graph(5)), + (5, 2, nx.circular_ladder_graph(5)), + (2, 4, nx.cubical_graph()), + (4, 2, nx.cubical_graph())]: + G = nx.grid_2d_graph(m, n, periodic=True) + assert_true(nx.could_be_isomorphic(G, H)) + + def test_periodic_directed(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.DiGraph()) + assert_equal(H.succ, G.adj) + assert_equal(H.pred, G.adj) + + def test_periodic_multigraph(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.MultiGraph()) + assert_equal(list(G.edges()), list(H.edges())) + + def test_node_input(self): + G = nx.grid_2d_graph(4, 2, periodic=True) + H = nx.grid_2d_graph(range(4), range(2), periodic=True) + assert_true(nx.is_isomorphic(H, G)) + H = nx.grid_2d_graph("abcd", "ef", periodic=True) + assert_true(nx.is_isomorphic(H, G)) + G = nx.grid_2d_graph(5, 6) + H = nx.grid_2d_graph(range(5), range(6)) + assert_edges_equal(H, G) + + +class TestGridGraph: + """Unit tests for :func:`networkx.generators.lattice.grid_graph`""" + def test_grid_graph(self): + """grid_graph([n,m]) is a connected simple graph with the + following properties: + number_of_nodes = n*m + degree_histogram = [0,0,4,2*(n+m)-8,(n-2)*(m-2)] + """ + for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]: + dim = [n, m] + g = nx.grid_graph(dim) + assert_equal(len(g), n*m) + assert_equal(nx.degree_histogram(g), [0, 0, 4, 2 * (n + m) - 8, + (n - 2) * (m - 2)]) + + for n, m in [(1, 5), (5, 1)]: + dim = [n, m] + g = nx.grid_graph(dim) + assert_equal(len(g), n*m) + assert_true(nx.is_isomorphic(g, nx.path_graph(5))) + +# mg = nx.grid_graph([n,m], create_using=MultiGraph()) +# assert_equal(mg.edges(), g.edges()) + + def test_node_input(self): + G = nx.grid_graph([range(7, 9), range(3, 6)]) + assert_equal(len(G), 2 * 3) + assert_true(nx.is_isomorphic(G, nx.grid_graph([2, 3]))) + + +class TestHypercubeGraph: + """Unit tests for :func:`networkx.generators.lattice.hypercube_graph`""" + def test_special_cases(self): + for n, H in [(0, nx.null_graph()), (1, nx.path_graph(2)), + (2, nx.cycle_graph(4)), (3, nx.cubical_graph())]: + G = nx.hypercube_graph(n) + assert_true(nx.could_be_isomorphic(G, H)) + + def test_degree_distribution(self): + for n in range(1, 10): + G = nx.hypercube_graph(n) + expected_histogram = [0] * n + [2 ** n] + assert_equal(nx.degree_histogram(G), expected_histogram) + + +class TestTriangularLatticeGraph: + "Tests for :func:`networkx.generators.lattice.triangular_lattice_graph`" + def test_lattice_points(self): + """Tests that the graph is really a triangular lattice.""" + for m, n in [(2, 3), (2, 2), (2, 1), (3, 3), (3, 2), (3, 4)]: + G = nx.triangular_lattice_graph(m, n) + N = (n + 1) // 2 + assert_equal(len(G), (m + 1) * (1 + N) - (n % 2) * ((m + 1) // 2)) + for (i, j) in G.nodes(): + nbrs = G[(i, j)] + if i < N: + assert_true((i + 1, j) in nbrs) + if j < m: + assert_true((i, j + 1) in nbrs) + if j < m and (i > 0 or j % 2) and (i < N or (j + 1) % 2): + assert_true((i + 1, j + 1) in nbrs or (i - 1, j + 1) in nbrs) + + def test_directed(self): + """Tests for creating a directed triangular lattice.""" + G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph()) + H = nx.triangular_lattice_graph(3, 4, create_using=nx.DiGraph()) + assert_true(H.is_directed()) + for u, v in H.edges(): + assert_true(v[1] >= u[1]) + if v[1] == u[1]: + assert_true(v[0] > u[0]) + + def test_multigraph(self): + """Tests for creating a triangular lattice multigraph.""" + G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph()) + H = nx.triangular_lattice_graph(3, 4, create_using=nx.MultiGraph()) + assert_equal(list(H.edges()), list(G.edges())) + + def test_periodic(self): + G = nx.triangular_lattice_graph(4, 6, periodic=True) + assert_equal(len(G), 12) + assert_equal(G.size(), 36) + # all degrees are 6 + assert_equal(len([n for n, d in G.degree() if d != 6]), 0) + G = nx.triangular_lattice_graph(5, 7, periodic=True) + TLG = nx.triangular_lattice_graph + assert_raises(nx.NetworkXError, TLG, 2, 4, periodic=True) + assert_raises(nx.NetworkXError, TLG, 4, 4, periodic=True) + assert_raises(nx.NetworkXError, TLG, 2, 6, periodic=True) + + +class TestHexagonalLatticeGraph: + "Tests for :func:`networkx.generators.lattice.hexagonal_lattice_graph`" + def test_lattice_points(self): + """Tests that the graph is really a hexagonal lattice.""" + for m, n in [(4, 5), (4, 4), (4, 3), (3, 2), (3, 3), (3, 5)]: + G = nx.hexagonal_lattice_graph(m, n) + assert_equal(len(G), 2 * (m + 1) * (n + 1) - 2) + C_6 = nx.cycle_graph(6) + hexagons = [ + [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)], + [(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4)], + [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)], + [(2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2)], + [(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4)], + ] + for hexagon in hexagons: + assert_true(nx.is_isomorphic(G.subgraph(hexagon), C_6)) + + def test_directed(self): + """Tests for creating a directed hexagonal lattice.""" + G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) + H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.DiGraph()) + assert_true(H.is_directed()) + pos = nx.get_node_attributes(H, 'pos') + for u, v in H.edges(): + assert_true(pos[v][1] >= pos[u][1]) + if pos[v][1] == pos[u][1]: + assert_true(pos[v][0] > pos[u][0]) + + def test_multigraph(self): + """Tests for creating a hexagonal lattice multigraph.""" + G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) + H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.MultiGraph()) + assert_equal(list(H.edges()), list(G.edges())) + + def test_periodic(self): + G = nx.hexagonal_lattice_graph(4, 6, periodic=True) + assert_equal(len(G), 48) + assert_equal(G.size(), 72) + # all degrees are 3 + assert_equal(len([n for n, d in G.degree() if d != 3]), 0) + G = nx.hexagonal_lattice_graph(5, 8, periodic=True) + HLG = nx.hexagonal_lattice_graph + assert_raises(nx.NetworkXError, HLG, 2, 7, periodic=True) + assert_raises(nx.NetworkXError, HLG, 1, 4, periodic=True) + assert_raises(nx.NetworkXError, HLG, 2, 1, periodic=True)
Add triangular and hexagonal lattice generators I also did a few little book-keeping edits to other parts of generators/classic.py: I changed G.name for 2d grid graph in nonperiodic case and corrected some typos in comments in the file.
2016-04-22T20:47:09
networkx/networkx
2,104
networkx__networkx-2104
[ "2076" ]
9fcaea9cae8beaf46cbb9df0576102de92bbc691
diff --git a/networkx/algorithms/flow/mincost.py b/networkx/algorithms/flow/mincost.py --- a/networkx/algorithms/flow/mincost.py +++ b/networkx/algorithms/flow/mincost.py @@ -81,6 +81,14 @@ def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity', -------- cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + Examples -------- A simple example of a min cost flow problem. @@ -166,6 +174,14 @@ def min_cost_flow(G, demand = 'demand', capacity = 'capacity', -------- cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + Examples -------- A simple example of a min cost flow problem. @@ -216,6 +232,14 @@ def cost_of_flow(G, flowDict, weight = 'weight'): See also -------- max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). """ return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data = True))) @@ -275,6 +299,14 @@ def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'): -------- cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + Examples -------- >>> G = nx.DiGraph() @@ -303,7 +335,6 @@ def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'): >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7) True - """ maxFlow = nx.maximum_flow_value(G, s, t, capacity = capacity) H = nx.DiGraph(G) diff --git a/networkx/algorithms/flow/networksimplex.py b/networkx/algorithms/flow/networksimplex.py --- a/networkx/algorithms/flow/networksimplex.py +++ b/networkx/algorithms/flow/networksimplex.py @@ -96,7 +96,9 @@ def network_simplex(G, demand='demand', capacity='capacity', weight='weight'): ----- This algorithm is not guaranteed to work if edge weights or demands are floating point numbers (overflows and roundoff errors can - cause problems). + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). See also --------
max_flow_min_cost function running forever Hello, When I try to run this code, it's seemingly in some infinite loop. ``` python G = nx.DiGraph() G.add_edges_from([ ('1in', '1out', {'cost': 0, 'capacity': 1}), ('2out', '4in', {'cost': -9.48, 'capacity': 1}), ('2out', 'destination', {'cost': -10.9, 'capacity': 1}), ('2out', '3in', {'cost': -10.31, 'capacity': 1}), ('destination', 'sink', {'cost': 0, 'capacity': 1}), ('2in', '2out', {'cost': 0, 'capacity': 1}), ('source', '2in', {'cost': 0, 'capacity': 1}), ('source', '4in', {'cost': 0, 'capacity': 1}), ('source', '1in', {'cost': 0, 'capacity': 1}), ('source', '3in', {'cost': 0, 'capacity': 1}), ('4in', '4out', {'cost': 0, 'capacity': 1}), ('1out', '2in', {'cost': -10.31, 'capacity': 1}), ('1out', '4in', {'cost': -10.31, 'capacity': 1}), ('1out', 'destination', {'cost':-10.9, 'capacity': 1}), ('1out', '3in', {'cost': -9.48, 'capacity': 1}), ('4out', 'destination', {'cost': -10.9, 'capacity': 1}), ('3in', '3out', {'cost': 0, 'capacity': 1}), ('3out', '4in', {'cost': -10.31, 'capacity': 1}), ('3out', 'destination', {'cost': -10.9, 'capacity': 1}) ]) nx.max_flow_min_cost(G, 'source', 'sink', weight='cost') ``` Actually, this breaks whenever the capacity from destination to sink is capacity 1, 2, or 3 (capacity 4 it works fine). Here is an example of a similar list of edges where the algorithm runs fine for any capacity of destination-> sink from 1 to 7 ``` python [('destination', 'sink', {'capacity': 2, 'cost': 0}), ('1in', '1out', {'capacity': 1, 'cost': 0}), ('6out', '7in', {'capacity': 1, 'cost': -28.0}), ('6out', 'destination', {'capacity': 1, 'cost': -26.76}), ('source', '1in', {'capacity': 1, 'cost': 0}), ('source', '7in', {'capacity': 1, 'cost': 0}), ('source', '6in', {'capacity': 1, 'cost': 0}), ('source', '5in', {'capacity': 1, 'cost': 0}), ('source', '2in', {'capacity': 1, 'cost': 0}), ('source', '4in', {'capacity': 1, 'cost': 0}), ('source', '3in', {'capacity': 1, 'cost': 0}), ('7in', '7out', {'capacity': 1, 'cost': 0}), ('2out', '7in', {'capacity': 1, 'cost': -26.17}), ('2out', 'destination', {'capacity': 1, 'cost': -25.84}), ('2out', '6in', {'capacity': 1, 'cost': -26.76}), ('2out', '5in', {'capacity': 1, 'cost': -27.0}), ('2out', '4in', {'capacity': 1, 'cost': -27.0}), ('2out', '3in', {'capacity': 1, 'cost': -28.0}), ('6in', '6out', {'capacity': 1, 'cost': 0}), ('5in', '5out', {'capacity': 1, 'cost': 0}), ('2in', '2out', {'capacity': 1, 'cost': 0}), ('7out', 'destination', {'capacity': 1, 'cost': -27.59}), ('4in', '4out', {'capacity': 1, 'cost': 0}), ('1out', '7in', {'capacity': 1, 'cost': -25.84}), ('1out', 'destination', {'capacity': 1, 'cost': -25.0}), ('1out', '6in', {'capacity': 1, 'cost': -26.76}), ('1out', '5in', {'capacity': 1, 'cost': -27.59}), ('1out', '2in', {'capacity': 1, 'cost': -27.59}), ('1out', '4in', {'capacity': 1, 'cost': -25.84}), ('1out', '3in', {'capacity': 1, 'cost': -26.76}), ('4out', '7in', {'capacity': 1, 'cost': -27.0}), ('4out', 'destination', {'capacity': 1, 'cost': -27.59}), ('4out', '6in', {'capacity': 1, 'cost': -26.76}), ('4out', '5in', {'capacity': 1, 'cost': -26.17}), ('3out', '7in', {'capacity': 1, 'cost': -26.76}), ('3out', '4in', {'capacity': 1, 'cost': -28.0}), ('3out', 'destination', {'capacity': 1, 'cost': -26.76}), ('3out', '6in', {'capacity': 1, 'cost': -27.0}), ('3out', '5in', {'capacity': 1, 'cost': -26.76}), ('3in', '3out', {'capacity': 1, 'cost': 0}), ('5out', '7in', {'capacity': 1, 'cost': -27.0}), ('5out', 'destination', {'capacity': 1, 'cost': -25.84}), ('5out', '6in', {'capacity': 1, 'cost': -28.0})] ``` If this is my mistake let me know (I even posted on [StackOverflow](http://stackoverflow.com/questions/36630271/python-networkx-max-flow-min-cost-function-running-forever)).
Hi @misingnoglic. The algorithm implemented in `nx.network_simplex` is not guaranteed to work if edge weights are floating point numbers, and `nx.max_flow_min_cost` has the same limitations as it's using it. However only the docs for the former say so. As a workaround you can convert your weights to integers. In your example, multiplying cost by 100 will do it: ``` python In [2]: %paste G = nx.DiGraph() G.add_edges_from([ ('1in', '1out', {'cost': 0, 'capacity': 1}), ('2out', '4in', {'cost': -948, 'capacity': 1}), ('2out', 'destination', {'cost': -1090, 'capacity': 1}), ('2out', '3in', {'cost': -1031, 'capacity': 1}), ('destination', 'sink', {'cost': 0, 'capacity': 1}), ('2in', '2out', {'cost': 0, 'capacity': 1}), ('source', '2in', {'cost': 0, 'capacity': 1}), ('source', '4in', {'cost': 0, 'capacity': 1}), ('source', '1in', {'cost': 0, 'capacity': 1}), ('source', '3in', {'cost': 0, 'capacity': 1}), ('4in', '4out', {'cost': 0, 'capacity': 1}), ('1out', '2in', {'cost': -1031, 'capacity': 1}), ('1out', '4in', {'cost': -1031, 'capacity': 1}), ('1out', 'destination', {'cost':-1090, 'capacity': 1}), ('1out', '3in', {'cost': -948, 'capacity': 1}), ('4out', 'destination', {'cost': -1090, 'capacity': 1}), ('3in', '3out', {'cost': 0, 'capacity': 1}), ('3out', '4in', {'cost': -1031, 'capacity': 1}), ('3out', 'destination', {'cost': -1090, 'capacity': 1}) ]) ## -- End pasted text -- In [3]: %time nx.max_flow_min_cost(G, 'source', 'sink', weight='cost') CPU times: user 1.23 ms, sys: 0 ns, total: 1.23 ms Wall time: 3.12 ms Out[3]: {'1in': {'1out': 1}, '1out': {'2in': 1, '3in': 0, '4in': 0, 'destination': 0}, '2in': {'2out': 1}, '2out': {'3in': 1, '4in': 0, 'destination': 0}, '3in': {'3out': 1}, '3out': {'4in': 1, 'destination': 0}, '4in': {'4out': 1}, '4out': {'destination': 1}, 'destination': {'sink': 1}, 'sink': {}, 'source': {'1in': 1, '2in': 0, '3in': 0, '4in': 0}} ``` I'll put together a PR (hopefully this weekend) updating the docs (also for `nx.network_simplex`) because they don't mention the conversion to integers workaround. PS: It's not nice to cross post questions in different sites at the same time. @jtorrents Thank you for the response (I appreciate it a lot!), and sorry about the double posting - I figured if I posted on StackOverflow it would be answered if it was an issue related to graph theory, and here if it was something related to the specific implementation (also because in full disclosure my semester project is due tomorrow and I just realized this issue existed).
2016-04-24T20:29:05
networkx/networkx
2,136
networkx__networkx-2136
[ "2134" ]
5aefafab2f05b97b150c6bf681c21ba6465c8d10
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py --- a/networkx/readwrite/gml.py +++ b/networkx/readwrite/gml.py @@ -435,10 +435,6 @@ def pop_attr(dct, category, attr, i): if label != 'id': G = nx.relabel_nodes(G, mapping) - if 'name' in graph: - G.graph['name'] = graph['name'] - else: - del G.graph['name'] return G diff --git a/networkx/relabel.py b/networkx/relabel.py --- a/networkx/relabel.py +++ b/networkx/relabel.py @@ -147,7 +147,8 @@ def _relabel_inplace(G, mapping): def _relabel_copy(G, mapping): H = G.__class__() - H.name = "(%s)" % G.name + if G.name: + H.name = "(%s)" % G.name if G.is_multigraph(): H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy()) for (n1,n2,k,d) in G.edges(keys=True, data=True))
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py --- a/networkx/tests/test_relabel.py +++ b/networkx/tests/test_relabel.py @@ -150,6 +150,17 @@ def test_relabel_nodes_missing(self): mapping={0:'aardvark'} G=relabel_nodes(G,mapping,copy=False) + def test_relabel_copy_name(self): + G=Graph() + H = relabel_nodes(G, {}, copy=True) + assert_equal(H.graph, G.graph) + H = relabel_nodes(G, {}, copy=False) + assert_equal(H.graph, G.graph) + G.name = "first" + H = relabel_nodes(G, {}, copy=True) + assert_equal(H.graph, G.graph) + H = relabel_nodes(G, {}, copy=False) + assert_equal(H.graph, G.graph) def test_relabel_toposort(self): K5=nx.complete_graph(4)
relabel_nodes adds a graph attribute when copy=True I would have expected the following to work: ``` import networkx as nx graph_a = nx.DiGraph() graph_b = nx.relabel_nodes(graph_a, {}, copy=True) print "graph_a.graph", graph_a.graph print "graph_b.graph", graph_b.graph assert graph_a.graph == graph_b.graph ``` However, it does not since [_relabel_copy attempts to copy a non-existent graph attribute, 'name'](https://github.com/networkx/networkx/blob/1675a824d6cdb17c3144ef46ff52a0c2b53a11d1/networkx/relabel.py#L150). I would have expected relabel_nodes to only change the node labels, while maintaining all graph/node/edge attributes.
2016-05-19T01:37:02
networkx/networkx
2,158
networkx__networkx-2158
[ "2156" ]
df730d96d6490079a6b6fcf3a2bea64324aef02e
diff --git a/networkx/algorithms/matching.py b/networkx/algorithms/matching.py --- a/networkx/algorithms/matching.py +++ b/networkx/algorithms/matching.py @@ -21,10 +21,10 @@ def maximal_matching(G): - r"""Find a maximal cardinality matching in the graph. + r"""Find a maximal matching in the graph. A matching is a subset of edges in which no node occurs more than once. - The cardinality of a matching is the number of matched edges. + A maximal matching cannot add more edges and still be a matching. Parameters ---------- @@ -152,8 +152,9 @@ def max_weight_matching(G, maxcardinality=False, weight='weight'): """Compute a maximum-weighted matching of G. A matching is a subset of edges in which no node occurs more than once. - The cardinality of a matching is the number of matched edges. The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. Parameters ---------- diff --git a/networkx/algorithms/operators/product.py b/networkx/algorithms/operators/product.py --- a/networkx/algorithms/operators/product.py +++ b/networkx/algorithms/operators/product.py @@ -131,7 +131,7 @@ def tensor_product(G, H): r"""Return the tensor product of G and H. The tensor product P of the graphs G and H has a node set that - is the Cartesian product of the node sets, :math:`V(P)=V(G) \times V(H)`. + is the tensor product of the node sets, :math:`V(P)=V(G) \times V(H)`. P has an edge ((u,v),(x,y)) if and only if (u,x) is an edge in G and (v,y) is an edge in H.
maximal matching documentation networkx.algorithms.matching.maximal_matching is documented as "Find a maximal cardinality matching in the graph." If it is just "maximal matching", then the use of 'cardinality' seems confusing. Since the algorithm is not "maximum cardinality" matching. For example, given a graph 'c'--'a'--'b'--'d' the method might return {('a', 'b')} as a maximal matching, which is not a maximum cardinality matching {('a', 'c'),('b', 'd')}. ``` import networkx as nx g=nx.Graph() g.add_edge('b','a') g.add_edge('c','a') g.add_edge('b','d') print(nx.maximal_matching(g)) ``` (The above test depends on the order of edges, which is not deterministic. If you get two edges as a result, you might restart python console and rerun the code.) Could you remove 'cardinality' from the document?
You are right, that is confusing. Thanks for pointing this out.
2016-06-08T20:36:56
networkx/networkx
2,206
networkx__networkx-2206
[ "2205" ]
0418e01085a8d2a193ce7b6252ac0de354b2bb36
diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py --- a/networkx/classes/digraph.py +++ b/networkx/classes/digraph.py @@ -796,7 +796,7 @@ def edges(self, nbunch=None, data=False, default=None): # alias out_edges to edges out_edges = edges - def in_edges(self, nbunch=None, data=False): + def in_edges(self, nbunch=None, data=False, default=None): """Return an iterator over the incoming edges. Parameters @@ -804,8 +804,15 @@ def in_edges(self, nbunch=None, data=False): nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. - data : bool, optional (default=False) - If True, return edge attribute dict in 3-tuple (u,v,data). + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u,v,ddict[data]). + If True, return edge attribute dict in 3-tuple (u,v,ddict). + If False, return 2-tuple (u,v). + default : value, optional (default=None) + Value used for edges that dont have the requested attribute. + Only relevant if data is not True or False. + + Returns ------- @@ -820,10 +827,15 @@ def in_edges(self, nbunch=None, data=False): nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) - if data: + if data is True: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): yield (nbr,n,data) + elif data is not False: + for n,nbrs in nodes_nbrs: + for nbr,ddict in nbrs.items(): + d=ddict[data] if data in ddict else default + yield (nbr,n,d) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: diff --git a/networkx/classes/multidigraph.py b/networkx/classes/multidigraph.py --- a/networkx/classes/multidigraph.py +++ b/networkx/classes/multidigraph.py @@ -493,7 +493,7 @@ def edges(self, nbunch=None, data=False, keys=False, default=None): out_edges = edges - def in_edges(self, nbunch=None, data=False, keys=False): + def in_edges(self, nbunch=None, data=False, keys=False, default=None): """Return an iterator over the incoming edges. Parameters @@ -501,10 +501,17 @@ def in_edges(self, nbunch=None, data=False, keys=False): nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. - data : bool, optional (default=False) - If True, return edge attribute dict with each edge. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u,v,ddict[data]). + If True, return edge attribute dict in 3-tuple (u,v,ddict). + If False, return 2-tuple (u,v). + keys : bool, optional (default=False) If True, return edge keys with each edge. + default : value, optional (default=None) + Value used for edges that dont have the requested attribute. + Only relevant if data is not True or False. + Returns ------- @@ -519,7 +526,7 @@ def in_edges(self, nbunch=None, data=False, keys=False): nodes_nbrs = self.pred.items() else: nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch)) - if data: + if data is True: for n, nbrs in nodes_nbrs: for nbr, keydict in nbrs.items(): for key, data in keydict.items(): @@ -527,6 +534,15 @@ def in_edges(self, nbunch=None, data=False, keys=False): yield (nbr, n, key, data) else: yield (nbr, n, data) + elif data is not False: + for n, nbrs in nodes_nbrs: + for nbr, keydict in nbrs.items(): + for key, ddict in keydict.items(): + d = ddict[data] if data in ddict else default + if keys: + yield (nbr, n, key, d) + else: + yield (nbr, n, d) else: for n, nbrs in nodes_nbrs: for nbr, keydict in nbrs.items():
diff --git a/networkx/classes/tests/test_digraph.py b/networkx/classes/tests/test_digraph.py --- a/networkx/classes/tests/test_digraph.py +++ b/networkx/classes/tests/test_digraph.py @@ -70,6 +70,13 @@ def test_out_edges_dir(self): assert_equal(sorted(G.out_edges(0)),[(0, 1)]) assert_equal(sorted(G.out_edges(2)),[]) + def test_out_edges_data(self): + G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})]) + assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})]) + assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data' : 0})]) + assert_equal(sorted(G.out_edges(data='data')), [(0, 1, 0), (1, 0, None)]) + assert_equal(sorted(G.out_edges(0, data='data')), [(0, 1, 0)]) + def test_in_edges_dir(self): G=self.P3 assert_equal(sorted(G.in_edges()),[(0, 1), (1, 2)]) @@ -82,6 +89,13 @@ def test_in_edges_dir(self): assert_equal(sorted(G.in_edges(0)),[]) assert_equal(sorted(G.in_edges(2)),[(1,2)]) + def test_in_edges_data(self): + G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})]) + assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})]) + assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data' : 0})]) + assert_equal(sorted(G.in_edges(data='data')), [(0, 1, 0), (1, 0, None)]) + assert_equal(sorted(G.in_edges(1, data='data')), [(0, 1, 0)]) + def test_degree(self): G=self.K3 assert_equal(list(G.degree()),[(0,4),(1,4),(2,4)]) diff --git a/networkx/classes/tests/test_multidigraph.py b/networkx/classes/tests/test_multidigraph.py --- a/networkx/classes/tests/test_multidigraph.py +++ b/networkx/classes/tests/test_multidigraph.py @@ -45,6 +45,14 @@ def test_out_edges(self): assert_equal(sorted(G.out_edges()), [(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) + def test_out_edges_data(self): + G=self.K3 + assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})]) + G.remove_edge(0,1) + G.add_edge(0,1,data=1) + assert_equal(sorted(G.edges(0,data=True)),[(0,1,{'data':1}),(0,2,{})]) + assert_equal(sorted(G.edges(0,data='data')),[(0,1,1),(0,2,None)]) + def test_in_edges(self): G=self.K3 assert_equal(sorted(G.in_edges()), @@ -69,6 +77,13 @@ def test_in_edges(self): [(0,1,{}),(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}), (2,0,{}),(2,1,{})]) + def test_in_edges_data(self): + G=self.K3 + assert_equal(sorted(G.in_edges(0,data=True)),[(1,0,{}),(2,0,{})]) + G.remove_edge(1,0) + G.add_edge(1,0, data=1) + assert_equal(sorted(G.in_edges(0,data=True)),[(1,0,{'data':1}),(2,0,{})]) + assert_equal(sorted(G.in_edges(0,data='data')),[(1,0,1),(2,0,None)]) def is_shallow(self,H,G): # graph
Retrieving edge data in directed graphs has an asymmetric API [DiGraph.out_edges](http://networkx.readthedocs.io/en/latest/reference/generated/networkx.DiGraph.out_edges.html) has the useful feature that you can retrieve a single value from the data dictionary by specifiying a string. [DiGraph.in_edges](http://networkx.readthedocs.io/en/latest/reference/generated/networkx.DiGraph.in_edges.html), [MultiDiGraph.out_edges](http://networkx.readthedocs.io/en/latest/reference/generated/networkx.MultiDiGraph.out_edges.html) and [MultiDiGraph.in_edges](http://networkx.readthedocs.io/en/latest/reference/generated/networkx.MultiDiGraph.in_edges.html) lack this. I'll likely prepare a PR for this myself, but I wanted to make this issue first, in case I missed something or anyone has useful information in relation to this.
It would be good for them all to have that feature. Thanks!
2016-07-30T15:27:43
networkx/networkx
2,303
networkx__networkx-2303
[ "2283" ]
17b8d21f7edd4ec4388ba9bd52acb3a91d6ef9e2
diff --git a/tools/gh_api.py b/tools/gh_api.py deleted file mode 100644 --- a/tools/gh_api.py +++ /dev/null @@ -1,201 +0,0 @@ -"""Functions for Github authorisation.""" -from __future__ import print_function - -try: - input = raw_input -except NameError: - pass - -import os - -import requests -import getpass -import json - -# Keyring stores passwords by a 'username', but we're not storing a username and -# password -fake_username = 'networkx-tests' - -class Obj(dict): - """Dictionary with attribute access to names.""" - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, val): - self[name] = val - -token = None -def get_auth_token(): - global token - - if token is not None: - return token - - import keyring - token = keyring.get_password('github', fake_username) - if token is not None: - return token - - print("Please enter your github username and password. These are not " - "stored, only used to get an oAuth token. You can revoke this at " - "any time on Github.") - user = input("Username: ") - pw = getpass.getpass("Password: ") - - auth_request = { - "scopes": [ - "public_repo", - "gist" - ], - "note": "NetworkX tools", - "note_url": "https://github.com/networkx/networkx/tree/master/tools", - } - response = requests.post('https://api.github.com/authorizations', - auth=(user, pw), data=json.dumps(auth_request)) - response.raise_for_status() - token = json.loads(response.text)['token'] - keyring.set_password('github', fake_username, token) - return token - -def make_auth_header(): - return {'Authorization': 'token ' + get_auth_token()} - -def post_issue_comment(project, num, body): - url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num) - payload = json.dumps({'body': body}) - r = requests.post(url, data=payload, headers=make_auth_header()) - -def post_gist(content, description='', filename='file', auth=False): - """Post some text to a Gist, and return the URL.""" - post_data = json.dumps({ - "description": description, - "public": True, - "files": { - filename: { - "content": content - } - } - }).encode('utf-8') - - headers = make_auth_header() if auth else {} - response = requests.post("https://api.github.com/gists", data=post_data, headers=headers) - response.raise_for_status() - response_data = json.loads(response.text) - return response_data['html_url'] - -def get_pull_request(project, num): - """get pull request info by number - """ - url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num) - response = requests.get(url) - response.raise_for_status() - return json.loads(response.text, object_hook=Obj) - -def get_pulls_list(project): - """get pull request list - """ - url = "https://api.github.com/repos/{project}/pulls".format(project=project) - response = requests.get(url) - response.raise_for_status() - return json.loads(response.text) - -# encode_multipart_formdata is from urllib3.filepost -# The only change is to iter_fields, to enforce S3's required key ordering - -def iter_fields(fields): - fields = fields.copy() - for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId', - 'Policy', 'Signature', 'Content-Type', 'file'): - yield (key, fields.pop(key)) - for (k,v) in fields.items(): - yield k,v - -def encode_multipart_formdata(fields, boundary=None): - """ - Encode a dictionary of `fields` using the multipart/form-data mime format. - - :param fields: - Dictionary of fields or list of (key, value) field tuples. The key is - treated as the field name, and the value as the body of the form-data - bytes. If the value is a tuple of two elements, then the first element - is treated as the filename of the form-data section. - - Field names and filenames must be unicode. - - :param boundary: - If not specified, then a random boundary will be generated using - :func:`mimetools.choose_boundary`. - """ - # copy requests imports in here: - from io import BytesIO - from requests.packages.urllib3.filepost import ( - choose_boundary, six, writer, b, get_content_type - ) - body = BytesIO() - if boundary is None: - boundary = choose_boundary() - - for fieldname, value in iter_fields(fields): - body.write(b('--%s\r\n' % (boundary))) - - if isinstance(value, tuple): - filename, data = value - writer(body).write('Content-Disposition: form-data; name="%s"; ' - 'filename="%s"\r\n' % (fieldname, filename)) - body.write(b('Content-Type: %s\r\n\r\n' % - (get_content_type(filename)))) - else: - data = value - writer(body).write('Content-Disposition: form-data; name="%s"\r\n' - % (fieldname)) - body.write(b'Content-Type: text/plain\r\n\r\n') - - if isinstance(data, int): - data = str(data) # Backwards compatibility - if isinstance(data, six.text_type): - writer(body).write(data) - else: - body.write(data) - - body.write(b'\r\n') - - body.write(b('--%s--\r\n' % (boundary))) - - content_type = b('multipart/form-data; boundary=%s' % boundary) - - return body.getvalue(), content_type - - -def post_download(project, filename, name=None, description=""): - """Upload a file to the GitHub downloads area""" - if name is None: - name = os.path.basename(filename) - with open(filename, 'rb') as f: - filedata = f.read() - - url = "https://api.github.com/repos/{project}/downloads".format(project=project) - - payload = json.dumps(dict(name=name, size=len(filedata), - description=description)) - response = requests.post(url, data=payload, headers=make_auth_header()) - response.raise_for_status() - reply = json.loads(response.content) - s3_url = reply['s3_url'] - - fields = dict( - key=reply['path'], - acl=reply['acl'], - success_action_status=201, - Filename=reply['name'], - AWSAccessKeyId=reply['accesskeyid'], - Policy=reply['policy'], - Signature=reply['signature'], - file=(reply['name'], filedata), - ) - fields['Content-Type'] = reply['mime_type'] - data, content_type = encode_multipart_formdata(fields) - s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type}) - return s3r
diff --git a/tools/post_pr_test.py b/tools/post_pr_test.py deleted file mode 100755 --- a/tools/post_pr_test.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -"""Post the results of a pull request test to Github. -""" -from test_pr import TestRun - -testrun = TestRun.load_results() -testrun.post_logs() -testrun.print_results() -testrun.post_results_comment() - -print() -print("Posted test results to pull request") -print(" " + testrun.pr['html_url']) diff --git a/tools/test_pr.py b/tools/test_pr.py deleted file mode 100755 --- a/tools/test_pr.py +++ /dev/null @@ -1,355 +0,0 @@ -#!/usr/bin/env python -""" -This is a script for testing pull requests for NetworkX. It merges the pull -request with current master, installs and tests on all available versions of -Python, and posts the results to Gist if any tests fail. - -This script is heavily based on IPython's test_pr.py and friends. See: - -http://github.com/ipython/ipython/tree/master/tools - -Usage: - python test_pr.py 742 -""" -from __future__ import print_function - -import errno -from glob import glob -import io -import json -import os -import pickle -import re -import requests -import shutil -import time -from subprocess import call, check_call, check_output, PIPE, STDOUT, CalledProcessError -import sys - -import gh_api -from gh_api import Obj - -basedir = os.path.join(os.path.expanduser("~"), ".nx_pr_tests") -repodir = os.path.join(basedir, "networkx") -nx_repository = 'git://github.com/networkx/networkx.git' -nx_http_repository = 'http://github.com/networkx/networkx.git' -gh_project="networkx/networkx" - -# TODO Add PyPy support -supported_pythons = ['python2.6', 'python2.7', 'python3.2','python3.3'] - -# Report missing libraries during tests and number of skipped -# and passed tests. -missing_libs_re = re.compile('SKIP: (\w+) not available') -def get_missing_libraries(log): - libs = set() - for line in log.split('\n'): - m = missing_libs_re.search(line) - if m: - libs.add(m.group(1).lower()) - if libs: - return ", ".join(libs) - -skipped_re = re.compile('SKIP=(\d+)') -def get_skipped(log): - m = skipped_re.search(log) - if m: - return m.group(1) - -number_tests_re = re.compile('Ran (\d+) tests in') -def get_number_tests(log): - m = number_tests_re.search(log) - if m: - return m.group(1) - - -class TestRun(object): - def __init__(self, pr_num): - self.unavailable_pythons = [] - self.venvs = [] - self.pr_num = pr_num - - self.pr = gh_api.get_pull_request(gh_project, pr_num) - - self.setup() - - self.results = [] - - def available_python_versions(self): - """Get the executable names of available versions of Python on the system. - """ - for py in supported_pythons: - try: - check_call([py, '-c', 'import nose'], stdout=PIPE) - yield py - except (OSError, CalledProcessError): - self.unavailable_pythons.append(py) - - def setup(self): - """Prepare the repository and virtualenvs.""" - try: - os.mkdir(basedir) - except OSError as e: - if e.errno != errno.EEXIST: - raise - os.chdir(basedir) - - # Delete virtualenvs and recreate - for venv in glob('venv-*'): - shutil.rmtree(venv) - for py in self.available_python_versions(): - check_call(['virtualenv', '-p', py, - '--system-site-packages', 'venv-%s' % py]) - - self.venvs.append((py, 'venv-%s' % py)) - - # Check out and update the repository - if not os.path.exists('networkx'): - try : - check_call(['git', 'clone', nx_repository]) - except CalledProcessError: - check_call(['git', 'clone', nx_http_repository]) - os.chdir(repodir) - check_call(['git', 'checkout', 'master']) - try : - check_call(['git', 'pull', 'origin', 'master']) - except CalledProcessError: - check_call(['git', 'pull', nx_http_repository, 'master']) - self.master_sha = check_output(['git', 'log', '-1', - '--format=%h']).decode('ascii').strip() - os.chdir(basedir) - - def get_branch(self): - repo = self.pr['head']['repo']['clone_url'] - branch = self.pr['head']['ref'] - owner = self.pr['head']['repo']['owner']['login'] - mergeable = self.pr['mergeable'] - - os.chdir(repodir) - if mergeable: - merged_branch = "%s-%s" % (owner, branch) - # Delete the branch first - call(['git', 'branch', '-D', merged_branch]) - check_call(['git', 'checkout', '-b', merged_branch]) - check_call(['git', 'pull', '--no-ff', '--no-commit', repo, branch]) - check_call(['git', 'commit', '-m', "merge %s/%s" % (repo, branch)]) - else: - # Fetch the branch without merging it. - check_call(['git', 'fetch', repo, branch]) - check_call(['git', 'checkout', 'FETCH_HEAD']) - os.chdir(basedir) - - def markdown_format(self): - def format_result(result): - s = "* %s: " % result.py - if result.passed: - s += "%s OK (SKIP=%s) Ran %s tests" % \ - (ok, result.skipped, result.num_tests) - else: - s += "%s Failed, log at %s" % (fail, result.log_url) - if result.missing_libraries: - s += " (libraries not available: " + result.missing_libraries + ")" - return s - pr_num = self.pr_num - branch = self.pr['head']['ref'] - branch_url = self.pr['head']['repo']['html_url'] + '/tree/' + branch - owner = self.pr['head']['repo']['owner']['login'] - mergeable = self.pr['mergeable'] - master_sha = self.master_sha - branch_sha = self.pr['head']['sha'][:7] - ok = ':eight_spoked_asterisk:' - fail = ':red_circle:' - - header = "**NetworkX: Test results for pull request #%s " % pr_num - header += "([%s '%s' branch](%s))**" % (owner, branch, branch_url) - if mergeable: - mrg = "%s This pull request can be merged cleanly " % ok - else: - mrg = "%s This pull request **cannot** be merged cleanly " % fail - mrg += "(commit %s into NetworkX master %s)" % (branch_sha, master_sha) - lines = [header, - mrg, - "Platform: " + sys.platform, - ""] + \ - [format_result(r) for r in self.results] - if self.unavailable_pythons: - lines += ["", - "Not available for testing: " \ - + ", ".join(self.unavailable_pythons)] - return "\n".join(lines) - - def post_results_comment(self): - body = self.markdown_format() - gh_api.post_issue_comment(gh_project, self.pr_num, body) - - def print_results(self): - pr_num = self.pr_num - branch = self.pr['head']['ref'] - branch_url = self.pr['head']['repo']['html_url'] + '/tree/' + branch - owner = self.pr['head']['repo']['owner']['login'] - mergeable = self.pr['mergeable'] - master_sha = self.master_sha - branch_sha = self.pr['head']['sha'][:7] - - print("\n") - print("**NetworkX: Test results for pull request %s " % pr_num, - "(%s '%s' branch at %s)**" % (owner, branch, branch_url)) - if mergeable: - mrg = "OK: This pull request can be merged cleanly " - else: - mrg = "FAIL: This pull request **cannot** be merged cleanly " - mrg += "(commit %s into NetworkX master %s)" % (branch_sha, master_sha) - print(mrg) - print("Platform:", sys.platform) - for result in self.results: - if result.passed: - print(result.py, ":", "OK (SKIP=%s) Ran %s tests" % \ - (result.skipped, result.num_tests)) - else: - print(result.py, ":", "Failed") - print(" Test log:", result.get('log_url') or result.log_file) - if result.missing_libraries: - print(" Libraries not available:", result.missing_libraries) - if self.unavailable_pythons: - print("Not available for testing:", - ", ".join(self.unavailable_pythons)) - - def dump_results(self): - with open(os.path.join(basedir, 'lastresults.pkl'), 'wb') as f: - pickle.dump(self, f) - - @staticmethod - def load_results(): - with open(os.path.join(basedir, 'lastresults.pkl'), 'rb') as f: - return pickle.load(f) - - def save_logs(self): - for result in self.results: - if not result.passed: - result_locn = os.path.abspath(os.path.join('venv-%s' % result.py, - self.pr['head']['sha'][:7]+".log")) - with io.open(result_locn, 'w', encoding='utf-8') as f: - f.write(result.log) - - result.log_file = result_locn - - def post_logs(self): - for result in self.results: - if not result.passed: - result.log_url = gh_api.post_gist(result.log, - description='NetworkX test log', - filename="results.log", auth=True) - - def run(self): - for py, venv in self.venvs: - tic = time.time() - passed, log = run_tests(venv) - elapsed = int(time.time() - tic) - print("Ran tests with %s in %is" % (py, elapsed)) - missing_libraries = get_missing_libraries(log) - skipped = get_skipped(log) - num_tests = get_number_tests(log) - - self.results.append(Obj(py=py, - passed=passed, - log=log, - missing_libraries=missing_libraries, - skipped=skipped, - num_tests=num_tests - ) - ) - - -def run_tests(venv): - version = venv.split('-')[1] - py = os.path.join(basedir, venv, 'bin', 'python') - os.chdir(repodir) - # cleanup build-dir - if os.path.exists('build'): - shutil.rmtree('build') - #tic = time.time() - print ("\nInstalling NetworkX with %s" % py) - logfile = os.path.join(basedir, venv, 'install.log') - print ("Install log at %s" % logfile) - with open(logfile, 'wb') as f: - check_call([py, 'setup.py', 'install'], stderr=STDOUT, stdout=f) - #toc = time.time() - #print ("Installed NetworkX in %.1fs" % (toc-tic)) - os.chdir(basedir) - - # Remove PYTHONPATH if present - os.environ.pop("PYTHONPATH", None) - - # check that the right NetworkX is imported. Also catch exception if - # the pull request breaks "import networkx as nx" - try: - cmd_file = [py, '-c', 'import networkx as nx; print(nx.__file__)'] - nx_file = check_output(cmd_file, stderr=STDOUT) - except CalledProcessError as e: - return False, e.output.decode('utf-8') - - nx_file = nx_file.strip().decode('utf-8') - if not nx_file.startswith(os.path.join(basedir, venv)): - msg = u"NetworkX does not appear to be in the venv: %s" % nx_file - msg += u"\nDo you use setupegg.py develop?" - print(msg, file=sys.stderr) - return False, msg - - # Run tests: this is different than in ipython's test_pr, they use - # a script for running their tests. It gets installed at - # os.path.join(basedir, venv, 'bin', 'iptest') - print("\nRunning tests with %s ..." % version) - cmd = [py, '-c', 'import networkx as nx; nx.test(verbosity=2,doctest=True)'] - try: - return True, check_output(cmd, stderr=STDOUT).decode('utf-8') - except CalledProcessError as e: - return False, e.output.decode('utf-8') - - -def test_pr(num, post_results=True): - # Get Github authorisation first, so that the user is prompted straight away - # if their login is needed. - if post_results: - gh_api.get_auth_token() - - testrun = TestRun(num) - - testrun.get_branch() - - testrun.run() - - testrun.dump_results() - - testrun.save_logs() - testrun.print_results() - - if post_results: - results_urls = testrun.post_logs() - testrun.post_results_comment() - print("(Posted to Github)") - else: - post_script = os.path.join(os.path.dirname(sys.argv[0]), "post_pr_test.py") - print("To post the results to Github, run", post_script) - - -if __name__ == '__main__': - import argparse - parser = argparse.ArgumentParser(description="Test a pull request for NetworkX") - parser.add_argument('-p', '--publish', action='store_true', - help="Publish the results to Github") - parser.add_argument('number', type=int, help="The pull request number") - - args = parser.parse_args() - - # Test for requests version. - import requests - major, minor, rev = map(int, requests.__version__.split('.')) - if major == 0 and minor < 10: - print("test_pr.py:") - print("The requests python library must be version 0.10.0", - "or above, you have version", - "{0}.{1}.{2} installed".format(major, minor, rev)) - print() - sys.exit(1) - - test_pr(args.number, post_results=args.publish)
The tools/ directory is out-of-date These files are dated and no longer used, now that Travis is used for continuous integration. The files have also not been updated to handle recent versions of Python. This line in `/tools/test_pr.py` makes it clear: ``` python supported_pythons = ['python2.6', 'python2.7', 'python3.2','python3.3'] ```
I generally agree. The functions from `tools/test_pr.py` and `post_pr_test.py` seem to be covered by Travis. The functions from `tools/gh_api.py` are provided by other libraries (search "GitHub command line interface" and you'll find several).
2016-11-03T18:52:00
networkx/networkx
2,335
networkx__networkx-2335
[ "2327" ]
3efa5c5ace935ae83cea3583fbc6c3b442d4dd9b
diff --git a/networkx/algorithms/link_analysis/pagerank_alg.py b/networkx/algorithms/link_analysis/pagerank_alg.py --- a/networkx/algorithms/link_analysis/pagerank_alg.py +++ b/networkx/algorithms/link_analysis/pagerank_alg.py @@ -35,8 +35,9 @@ def pagerank(G, alpha=0.85, personalization=None, personalization: dict, optional The "personalization vector" consisting of a dictionary with a - key for every graph node and personalization value for each node. + key some subset of graph nodes and personalization value each of those. At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. By default, a uniform distribution is used. max_iter : integer, optional @@ -129,11 +130,6 @@ def pagerank(G, alpha=0.85, personalization=None, # Assign uniform personalization vector if not given p = dict.fromkeys(W, 1.0 / N) else: - missing = set(G) - set(personalization) - if missing: - raise NetworkXError('Personalization dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) s = float(sum(personalization.values())) p = dict((k, v / s) for k, v in personalization.items()) @@ -141,11 +137,6 @@ def pagerank(G, alpha=0.85, personalization=None, # Use personalization vector if dangling vector not specified dangling_weights = p else: - missing = set(G) - set(dangling) - if missing: - raise NetworkXError('Dangling node dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) s = float(sum(dangling.values())) dangling_weights = dict((k, v/s) for k, v in dangling.items()) dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0] @@ -160,7 +151,7 @@ def pagerank(G, alpha=0.85, personalization=None, # doing a left multiply x^T=xlast^T*W for nbr in W[n]: x[nbr] += alpha * xlast[n] * W[n][nbr][weight] - x[n] += danglesum * dangling_weights[n] + (1.0 - alpha) * p[n] + x[n] += danglesum * dangling_weights.get(n,0) + (1.0 - alpha) * p.get(n,0) # check convergence, l1 norm err = sum([abs(x[n] - xlast[n]) for n in x]) if err < N*tol: @@ -183,7 +174,9 @@ def google_matrix(G, alpha=0.85, personalization=None, personalization: dict, optional The "personalization vector" consisting of a dictionary with a - key for every graph node and nonzero personalization value for each node. + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. By default, a uniform distribution is used. nodelist : list, optional @@ -238,25 +231,15 @@ def google_matrix(G, alpha=0.85, personalization=None, if personalization is None: p = np.repeat(1.0 / N, N) else: - missing = set(nodelist) - set(personalization) - if missing: - raise NetworkXError('Personalization vector dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) - p = np.array([personalization[n] for n in nodelist], dtype=float) + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) p /= p.sum() # Dangling nodes if dangling is None: dangling_weights = p else: - missing = set(nodelist) - set(dangling) - if missing: - raise NetworkXError('Dangling node dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) # Convert the dangling dictionary into an array in nodelist order - dangling_weights = np.array([dangling[n] for n in nodelist], + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) dangling_weights /= dangling_weights.sum() dangling_nodes = np.where(M.sum(axis=1) == 0)[0] @@ -288,9 +271,11 @@ def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight', Damping parameter for PageRank, default=0.85. personalization: dict, optional - The "personalization vector" consisting of a dictionary with a - key for every graph node and nonzero personalization value for each - node. By default, a uniform distribution is used. + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. weight : key, optional Edge data key to use as weight. If None weights are set to 1. @@ -370,9 +355,11 @@ def pagerank_scipy(G, alpha=0.85, personalization=None, Damping parameter for PageRank, default=0.85. personalization: dict, optional - The "personalization vector" consisting of a dictionary with a - key for every graph node and nonzero personalization value for each - node. By default, a uniform distribution is used. + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specfiied, a nodes personalization value will be zero. + By default, a uniform distribution is used. max_iter : integer, optional Maximum number of iterations in power method eigenvalue solver. @@ -452,26 +439,15 @@ def pagerank_scipy(G, alpha=0.85, personalization=None, if personalization is None: p = scipy.repeat(1.0 / N, N) else: - missing = set(nodelist) - set(personalization) - if missing: - raise NetworkXError('Personalization vector dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) - p = scipy.array([personalization[n] for n in nodelist], - dtype=float) + p = scipy.array([personalization.get(n, 0) for n in nodelist], dtype=float) p = p / p.sum() # Dangling nodes if dangling is None: dangling_weights = p else: - missing = set(nodelist) - set(dangling) - if missing: - raise NetworkXError('Dangling node dictionary ' - 'must have a value for every node. ' - 'Missing nodes %s' % missing) # Convert the dangling dictionary into an array in nodelist order - dangling_weights = scipy.array([dangling[n] for n in nodelist], + dangling_weights = scipy.array([dangling.get(n, 0) for n in nodelist], dtype=float) dangling_weights /= dangling_weights.sum() is_dangling = scipy.where(S == 0)[0]
diff --git a/networkx/algorithms/link_analysis/tests/test_pagerank.py b/networkx/algorithms/link_analysis/tests/test_pagerank.py --- a/networkx/algorithms/link_analysis/tests/test_pagerank.py +++ b/networkx/algorithms/link_analysis/tests/test_pagerank.py @@ -71,13 +71,7 @@ def test_google_matrix(self): p = numpy.array(ev[:, 0] / ev[:, 0].sum())[:, 0] for (a, b) in zip(p, self.G.pagerank.values()): assert_almost_equal(a, b) - - personalize = dict((n, random.random()) for n in G) - M = networkx.google_matrix(G, alpha=0.9, personalization=personalize) - personalize.pop(1) - assert_raises(networkx.NetworkXError, networkx.google_matrix, G, - personalization=personalize) - + def test_personalization(self): G = networkx.complete_graph(4) personalize = {0: 1, 1: 1, 2: 4, 3: 4} @@ -85,9 +79,6 @@ def test_personalization(self): p = networkx.pagerank(G, alpha=0.85, personalization=personalize) for n in G: assert_almost_equal(p[n], answer[n], places=4) - personalize.pop(0) - assert_raises(networkx.NetworkXError, networkx.pagerank, G, - personalization=personalize) def test_zero_personalization_vector(self): G = networkx.complete_graph(4) @@ -102,7 +93,15 @@ def test_one_nonzero_personalization_value(self): p = networkx.pagerank(G, alpha=0.85, personalization=personalize) for n in G: assert_almost_equal(p[n], answer[n], places=4) - + + def test_incomplete_personalization(self): + G = networkx.complete_graph(4) + personalize = {3: 1} + answer = {0: 0.22077931820379187, 1: 0.22077931820379187, 2: 0.22077931820379187, 3: 0.3376620453886241} + p = networkx.pagerank(G, alpha=0.85, personalization=personalize) + for n in G: + assert_almost_equal(p[n], answer[n], places=4) + def test_dangling_matrix(self): """ Tests that the google_matrix doesn't change except for the dangling
Personalization in PageRank In https://github.com/networkx/networkx/blob/master/networkx/algorithms/link_analysis/pagerank_alg.py in all algorithms, the personalization dict accepted needs a value for every node. In my personal application, most values are 0. I suspect that in a lot of applications, including the default, most nodes will have the same probability. Furthermore, for all but the PageRank algorithm, the docstring suggests that each of these values needs to be non-zero. As far as I know, there is no code-technical or mathematical basis for this and it should be "at least one non-zero value". My proposal: - Add a parameter specifying a default value for personalization. - Do not force presence of all keys in personalization, instead use default value for non-existant values. - Rewrite docstrings to specify "at least one non-zero value" I'll be happy to implement this. Feedback appreciated. :)
I'm not sure why the documentation is unclear. It should be "at least one non-zero value" as you suggest. I'm less excited about adding another keyword parameter for a default value. In your use case the default is 0. How about just allowing a dictionary with only some of the node keys and the rest get set to zero inside the pagerank functions? If a user requires a different default from 0 then the full dictionary would need to be specified. The use case for a default value would be "values x, y, z for these nodes, then uniform over the rest". I do agree with your reluctance for another parameter, and I suppose above use case is uncommon enough to require a full dict for it. I'll adopt your suggestion when implementing. It's reasonably easy to do that using dict.fromkeys() ```python In [1]: import networkx as nx In [2]: G = nx.path_graph(10) In [3]: d = dict.fromkeys(G,0.5) In [4]: d Out[4]: {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5, 6: 0.5, 7: 0.5, 8: 0.5, 9: 0.5} In [5]: d.update({0:1,2:1}) In [6]: d Out[6]: {0: 1, 1: 0.5, 2: 1, 3: 0.5, 4: 0.5, 5: 0.5, 6: 0.5, 7: 0.5, 8: 0.5, 9: 0.5} ```
2016-12-18T11:22:52
networkx/networkx
2,386
networkx__networkx-2386
[ "2384" ]
464bf8fc08ffa09cfd0183fb5cae1adfe6839e12
diff --git a/networkx/algorithms/bipartite/matching.py b/networkx/algorithms/bipartite/matching.py --- a/networkx/algorithms/bipartite/matching.py +++ b/networkx/algorithms/bipartite/matching.py @@ -300,7 +300,8 @@ def recurse(v): recurse(v) -def _is_connected_by_alternating_path(G, v, matching, targets): +def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, + targets): """Returns True if and only if the vertex `v` is connected to one of the target vertices by an alternating path in `G`. @@ -314,65 +315,56 @@ def _is_connected_by_alternating_path(G, v, matching, targets): `v` is a vertex in `G`. - `matching` is a dictionary representing a maximum matching in `G`, as - returned by, for example, :func:`maximum_matching`. + `matched_edges` is a set of edges present in a maximum matching in `G`. + + `unmatched_edges` is a set of edges not present in a maximum + matching in `G`. `targets` is a set of vertices. """ - # Get the set of matched edges and the set of unmatched edges. Only include - # one version of each undirected edge (for example, include edge (1, 2) but - # not edge (2, 1)). - matched_edges = {(u, v) for u, v in matching.items() if u <= v} - unmatched_edges = set(G.edges()) - matched_edges - - def _alternating_dfs(u, depth, along_matched=True): + def _alternating_dfs(u, along_matched=True): """Returns True if and only if `u` is connected to one of the targets by an alternating path. `u` is a vertex in the graph `G`. - `depth` specifies the maximum recursion depth of the depth-first - search. - If `along_matched` is True, this step of the depth-first search will continue only through edges in the given matching. Otherwise, it will continue only through edges *not* in the given matching. """ - # Base case 1: u is one of the target vertices. `u` is connected to one - # of the target vertices by an alternating path of length zero. - if u in targets: - return True - # Base case 2: we have exceeded are allowed depth. In this case, we - # have looked at a path of length `n`, so looking any further won't - # help. - if depth < 0: - return False - # Determine which set of edges to look across. - valid_edges = matched_edges if along_matched else unmatched_edges - for v in G[u]: - # Consider only those neighbors connected via a valid edge. - if (u, v) in valid_edges or (v, u) in valid_edges: - # Recursively perform a depth-first search starting from the - # neighbor. Decrement the depth limit and switch which set of - # vertices will be valid for next time. - return _alternating_dfs(v, depth - 1, not along_matched) - # If there are no more vertices to look through and we haven't yet - # found a target vertex, simply say that no path exists. + if along_matched: + edges = itertools.cycle([matched_edges, unmatched_edges]) + else: + edges = itertools.cycle([unmatched_edges, matched_edges]) + visited = set() + stack = [(u, iter(G[u]), next(edges))] + while stack: + parent, children, valid_edges = stack[-1] + try: + child = next(children) + if child not in visited: + if ((parent, child) in valid_edges + or (child, parent) in valid_edges): + if child in targets: + return True + visited.add(child) + stack.append((child, iter(G[child]), next(edges))) + except StopIteration: + stack.pop() return False # Check for alternating paths starting with edges in the matching, then # check for alternating paths starting with edges not in the - # matching. Initiate the depth-first search with the current depth equal to - # the number of nodes in the graph. - return (_alternating_dfs(v, len(G), along_matched=True) or - _alternating_dfs(v, len(G), along_matched=False)) + # matching. + return (_alternating_dfs(v, along_matched=True) or + _alternating_dfs(v, along_matched=False)) def _connected_by_alternating_paths(G, matching, targets): """Returns the set of vertices that are connected to one of the target - vertices by an alternating path in `G`. + vertices by an alternating path in `G` or are themselves a target. An *alternating path* is a path in which every other edge is in the specified maximum matching (and the remaining edges in the path are not in @@ -388,9 +380,18 @@ def _connected_by_alternating_paths(G, matching, targets): `targets` is a set of vertices. """ - # TODO This can be parallelized. - return {v for v in G if _is_connected_by_alternating_path(G, v, matching, - targets)} + # Get the set of matched edges and the set of unmatched edges. Only include + # one version of each undirected edge (for example, include edge (1, 2) but + # not edge (2, 1)). Using frozensets as an intermediary step we do not + # require nodes to be orderable. + edge_sets = {frozenset((u, v)) for u, v in matching.items()} + matched_edges = {tuple(edge) for edge in edge_sets} + unmatched_edges = {(u, v) for (u, v) in G.edges() + if frozenset((u, v)) not in edge_sets} + + return {v for v in G if v in targets or + _is_connected_by_alternating_path(G, v, matched_edges, + unmatched_edges, targets)} def to_vertex_cover(G, matching, top_nodes=None):
diff --git a/networkx/algorithms/bipartite/tests/test_matching.py b/networkx/algorithms/bipartite/tests/test_matching.py --- a/networkx/algorithms/bipartite/tests/test_matching.py +++ b/networkx/algorithms/bipartite/tests/test_matching.py @@ -166,6 +166,25 @@ def test_issue_2127(self): independent_set = set(G) - {v for _, v in vertex_cover} assert_equal({'B', 'D', 'F', 'I', 'H'}, independent_set) + def test_vertex_cover_issue_2384(self): + G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert_true(u in vertex_cover or v in vertex_cover) + + def test_unorderable_nodes(self): + a = object() + b = object() + c = object() + d = object() + e = object() + G = nx.Graph([(a, d), (b, d), (b, e), (c, d)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert_true(u in vertex_cover or v in vertex_cover) + def test_eppstein_matching(): """Test in accordance to issue #1927"""
bipartite.to_vertex_cover() gives incorrect results This program: ```python import networkx as nx print('Using version ' + nx.__version__) G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) print('Edges: ' + repr(list(G.edges()))) assert nx.is_bipartite(G) assert nx.is_connected(G) matching = nx.bipartite.maximum_matching(G) print('Matching: ' + repr(matching)) for u, v in matching.items(): assert matching[v] == u vertex_cover = nx.bipartite.to_vertex_cover(G, matching) print('Vertex cover: ' + repr(vertex_cover)) for u, v in G.edges(): assert u in vertex_cover or v in vertex_cover ``` produces: ``` $ python2.7 vertex_cover.py Using version 1.11 Edges: [(0, 3), (1, 3), (1, 4), (2, 3)] Matching: {0: 3, 1: 4, 3: 0, 4: 1} Vertex cover: set([0, 1]) Traceback (most recent call last): File "vertex_cover.py", line 16, in <module> assert u in vertex_cover or v in vertex_cover AssertionError $ python3.6 vertex_cover.py Using version 1.11 Edges: [(0, 3), (3, 1), (3, 2), (1, 4)] Matching: {0: 3, 1: 4, 3: 0, 4: 1} Vertex cover: {0, 1} Traceback (most recent call last): File "vertex_cover.py", line 16, in <module> assert u in vertex_cover or v in vertex_cover AssertionError ``` The vertex cover is incorrect, as verified by the `assert`s. Current git version (21114082) fails in the same way.
Thanks for the report. It looks like a bug to me (I think in _is_connected_by_alternating_path). @jfinkels may be able to comment if that is the issue. Yes I think there is a bug in `_is_connected_by_alternating_path`, concretely in the inner function `_alternating_dfs` that checks if unmatched nodes are linked to other nodes by alternating paths. In this concrete example the unmatched node of the left node set is 2, which is linked by alternating paths to nodes 0 and 3, but the current implementation fails to report them and thus the vertex cover reported (`{0, 1}`) is incorrect. It's not obvious to me why the inner function `_alternating_dfs` fails in this example. A possible solution could be to reimplement this inner function iteratively instead of recursively, like this: ```python def _alternating_dfs(u, along_matched=True): if along_matched: edges = cycle([matched_edges, unmatched_edges]) else: edges = cycle([unmatched_edges, matched_edges]) visited = set() stack = [(u, iter(G[u]), next(edges))] while stack: parent, children, valid_edges = stack[-1] try: child = next(children) if child not in visited: if (parent, child) in valid_edges or (child, parent) in valid_edges: if child in targets: return True visited.add(child) stack.append((child, iter(G[child]), next(edges))) except StopIteration: stack.pop() return False ``` I've checked this example with this approach and it works as it reports a correct vertex cover (`{1, 3}`). I can prepare a pull request if this approach looks good. I'll also merge #2375 now as it has already been reviewed and would generate conflicts with a PR for fixing this bug. Looks good. Please do make a PR.
2017-03-14T15:36:14
networkx/networkx
2,397
networkx__networkx-2397
[ "2342" ]
abff77d798b1fb0dfeb5186e26f926af68127056
diff --git a/networkx/drawing/nx_pylab.py b/networkx/drawing/nx_pylab.py --- a/networkx/drawing/nx_pylab.py +++ b/networkx/drawing/nx_pylab.py @@ -38,7 +38,7 @@ 'draw_shell'] -def draw(G, pos=None, ax=None, hold=None, **kwds): +def draw(G, pos=None, ax=None, **kwds): """Draw the graph G with Matplotlib. Draw the graph as a simple representation with no node @@ -60,10 +60,6 @@ def draw(G, pos=None, ax=None, hold=None, **kwds): ax : Matplotlib Axes object, optional Draw the graph in specified Matplotlib axes. - hold : bool, optional - Set the Matplotlib hold state. If True subsequent draw - commands will be added to the current axes. - kwds : optional keywords See networkx.draw_networkx() for a description of optional keywords. @@ -122,19 +118,13 @@ def draw(G, pos=None, ax=None, hold=None, **kwds): if 'with_labels' not in kwds: kwds['with_labels'] = 'labels' in kwds - b = plt.ishold() - # allow callers to override the hold state by passing hold=True|False - h = kwds.pop('hold', None) - if h is not None: - plt.hold(h) + try: draw_networkx(G, pos=pos, ax=ax, **kwds) ax.set_axis_off() plt.draw_if_interactive() except: - plt.hold(b) raise - plt.hold(b) return
Matplotlib2.0 compatibility There are some deprecation warning with Matplotlib2.0rc2: trigering code: ````python3 # Networks graph Example : #https://github.com/ipython/ipywidgets/blob/master/examples/Exploring%20Graphs.ipynb %matplotlib inline from ipywidgets import interact import matplotlib.pyplot as plt import networkx as nx # wrap a few graph generation functions so they have the same signature def random_lobster(n, m, k, p): return nx.random_lobster(n, p, p / m) def powerlaw_cluster(n, m, k, p): return nx.powerlaw_cluster_graph(n, m, p) def erdos_renyi(n, m, k, p): return nx.erdos_renyi_graph(n, p) def newman_watts_strogatz(n, m, k, p): return nx.newman_watts_strogatz_graph(n, k, p) @interact(n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001), generator={'lobster': random_lobster, 'power law': powerlaw_cluster, 'Newman-Watts-Strogatz': newman_watts_strogatz, u'Erdős-Rényi': erdos_renyi, }) def plot_random_graph(n, m, k, p, generator): g = generator(n, m, k, p) nx.draw(g) plt.title(generator.__name__) plt.show() ```` deprecation messages: ```` matplotlib\__init__.py:914: UserWarning: axes.hold is deprecated. Please remove it from your matplotlibrc and/or style files. ```` ```` networkx\drawing\nx_pylab.py:126: MatplotlibDeprecationWarning: Future behavior will be consistent with the long-time default: plot commands add elements without first clearing the Axes and/or Figure. b = plt.ishold() ````
Hi, I am having a similar issue using it in ipython: ``` In [1]: import networkx as nx In [2]: import matplotlib.pyplot as plt In [3]: G = nx.Graph() In [4]: G.add_nodes_from(range(1, 6)) In [7]: G.add_edge(1, 3) In [8]: G.add_edge(3, 5) In [9]: G.add_edge(5, 1) In [10]: G.add_edge(2, 1) In [11]: G.nodes() Out[11]: [1, 2, 3, 4, 5] In [12]: G.edges() Out[12]: [(1, 3), (1, 5), (1, 2), (3, 5)] In [13]: nx.draw(G) ``` And here is the output: ``` /usr/local/lib/python3.6/site-packages/networkx/drawing/nx_pylab.py:126: MatplotlibDeprecationWarning: pyplot.hold is deprecated. Future behavior will be consistent with the long-time default: plot commands add elements without first clearing the Axes and/or Figure. b = plt.ishold() /usr/local/lib/python3.6/site-packages/networkx/drawing/nx_pylab.py:138: MatplotlibDeprecationWarning: pyplot.hold is deprecated. Future behavior will be consistent with the long-time default: plot commands add elements without first clearing the Axes and/or Figure. plt.hold(b) /usr/local/lib/python3.6/site-packages/matplotlib/__init__.py:917: UserWarning: axes.hold is deprecated. Please remove it from your matplotlibrc and/or style files. warnings.warn(self.msg_depr_set % key) /usr/local/lib/python3.6/site-packages/matplotlib/rcsetup.py:152: UserWarning: axes.hold is deprecated, will be removed in 3.0 warnings.warn("axes.hold is deprecated, will be removed in 3.0") ``` PyPlot did create a plot, but looks like some things were deprecated in Matplotlib 2.0.0
2017-03-21T10:31:32
networkx/networkx
2,416
networkx__networkx-2416
[ "2415" ]
6eb64b56844f3990de61b15d372b4fd266177a34
diff --git a/networkx/algorithms/centrality/subgraph_alg.py b/networkx/algorithms/centrality/subgraph_alg.py --- a/networkx/algorithms/centrality/subgraph_alg.py +++ b/networkx/algorithms/centrality/subgraph_alg.py @@ -67,10 +67,10 @@ def subgraph_centrality_exp(G): Examples -------- - (from [1]_) + (Example from [1]_) >>> G = nx.Graph([(1,2),(1,5),(1,8),(2,3),(2,8),(3,4),(3,6),(4,5),(4,7),(5,6),(6,7),(7,8)]) >>> sc = nx.subgraph_centrality_exp(G) - >>> print(['%s %0.2f'%(node,sc[node]) for node in sc]) + >>> print(['%s %0.2f'%(node,sc[node]) for node in sorted(sc)]) ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] """ # alternative implementation that calculates the matrix exponential @@ -130,9 +130,10 @@ def subgraph_centrality(G): Examples -------- + (Example from [1]_) >>> G = nx.Graph([(1,2),(1,5),(1,8),(2,3),(2,8),(3,4),(3,6),(4,5),(4,7),(5,6),(6,7),(7,8)]) >>> sc = nx.subgraph_centrality(G) - >>> print(['%s %0.2f'%(node,sc[node]) for node in sc]) + >>> print(['%s %0.2f'%(node,sc[node]) for node in sorted(sc)]) ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] References
Travis with Python3.6 miniconda fails Fails with a gdal conflict error. Can a conda expert help us figure out how to fix this? ``` UnsatisfiableError: The following specifications were found to be in conflict: - gdal -> libnetcdf 4.3.2 -> curl 7.38.0 -> openssl 1.0.1* - gdal -> numpy 1.9* -> python 2.7* - python 3.6* Use "conda info <package>" to see the dependencies for each package. CondaEnvironmentNotFoundError: Could not find environment: test-environment . You can list all discoverable environments with `conda info --envs`. ```
2017-04-02T19:45:38
networkx/networkx
2,455
networkx__networkx-2455
[ "2454" ]
6363668959028df3b02b992f6354584ff23a3adc
diff --git a/networkx/algorithms/operators/product.py b/networkx/algorithms/operators/product.py --- a/networkx/algorithms/operators/product.py +++ b/networkx/algorithms/operators/product.py @@ -413,6 +413,7 @@ def power(G, k): if k <= 0: raise ValueError('k must be a positive integer') H = nx.Graph() + H.add_nodes_from(G) # update BFS code to ignore self loops. for n in G: seen = {} # level (number of hops) when seen in BFS
nx.power does not preserve zero-degree nodes ```shell >>> G = nx.Graph() >>> G.add_nodes_from([0, 1, 2]) >>> G.add_edge(0, 1) >>> G2 = nx.power(G, 2) >>> G.nodes() [0, 1, 2] >>> G2.nodes() [0, 1] ``` Here `nx.power` fails to preserve the zero-degree node 2 in `G2`. This happens for all graphs in all such scenarios. It is easy to see the problem: [the implementation](https://github.com/networkx/networkx/blob/master/networkx/algorithms/operators/product.py#L415-L434) adds nodes only as a byproduct of adding edges, and never actually initializes the result to contain all original nodes. A simple fix would be to change [this line](https://github.com/networkx/networkx/blob/master/networkx/algorithms/operators/product.py#L415) to `H = G.copy()` or `H = nx.Graph(); H.add_nodes_from(G.nodes())`. I can make a quick PR if this fix makes sense.
2017-05-16T21:28:04
networkx/networkx
2,471
networkx__networkx-2471
[ "1195" ]
8750d392cd6fe7f6b36ce86e77dca937c94ff146
diff --git a/networkx/algorithms/traversal/breadth_first_search.py b/networkx/algorithms/traversal/breadth_first_search.py --- a/networkx/algorithms/traversal/breadth_first_search.py +++ b/networkx/algorithms/traversal/breadth_first_search.py @@ -21,7 +21,7 @@ def generic_bfs_edges(G, source, neighbors=None): - """Iterates over edges in a breadth-first search. + """Iterate over edges in a breadth-first search. The breadth-first search begins at `source` and enqueues the neighbors of newly visited nodes specified by the `neighbors` @@ -78,7 +78,7 @@ def generic_bfs_edges(G, source, neighbors=None): def bfs_edges(G, source, reverse=False): - """Produce edges in a breadth-first-search starting at source. + """Iterate over edges in a breadth-first-search starting at source. Parameters ---------- diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py --- a/networkx/algorithms/traversal/depth_first_search.py +++ b/networkx/algorithms/traversal/depth_first_search.py @@ -20,11 +20,12 @@ __all__ = ['dfs_edges', 'dfs_tree', 'dfs_predecessors', 'dfs_successors', - 'dfs_preorder_nodes','dfs_postorder_nodes', + 'dfs_preorder_nodes', 'dfs_postorder_nodes', 'dfs_labeled_edges'] + def dfs_edges(G, source=None): - """Produce edges in a depth-first-search (DFS). + """Iterate over edges in a depth-first-search (DFS). Parameters ---------- @@ -42,7 +43,7 @@ def dfs_edges(G, source=None): Examples -------- >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_edges(G,0))) + >>> print(list(nx.dfs_edges(G, 0))) [(0, 1), (1, 2)] Notes @@ -52,30 +53,37 @@ def dfs_edges(G, source=None): If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges """ if source is None: - # produce edges for all components + # edges for all components nodes = G else: - # produce edges for components with source + # edges for components with source nodes = [source] - visited=set() + visited = set() for start in nodes: if start in visited: continue visited.add(start) - stack = [(start,iter(G[start]))] + stack = [(start, iter(G[start]))] while stack: - parent,children = stack[-1] + parent, children = stack[-1] try: child = next(children) if child not in visited: - yield parent,child + yield parent, child visited.add(child) - stack.append((child,iter(G[child]))) + stack.append((child, iter(G[child]))) except StopIteration: stack.pop() + def dfs_tree(G, source=None): """Return oriented tree constructed from a depth-first-search from source. @@ -94,7 +102,7 @@ def dfs_tree(G, source=None): Examples -------- >>> G = nx.path_graph(3) - >>> T = nx.dfs_tree(G,0) + >>> T = nx.dfs_tree(G, 0) >>> print(list(T.edges())) [(0, 1), (1, 2)] """ @@ -103,9 +111,10 @@ def dfs_tree(G, source=None): T.add_nodes_from(G) else: T.add_node(source) - T.add_edges_from(dfs_edges(G,source)) + T.add_edges_from(dfs_edges(G, source)) return T + def dfs_predecessors(G, source=None): """Return dictionary of predecessors in depth-first-search from source. @@ -125,7 +134,7 @@ def dfs_predecessors(G, source=None): Examples -------- >>> G = nx.path_graph(3) - >>> print(nx.dfs_predecessors(G,0)) + >>> print(nx.dfs_predecessors(G, 0)) {1: 0, 2: 1} Notes @@ -136,7 +145,7 @@ def dfs_predecessors(G, source=None): If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. """ - return dict((t,s) for s,t in dfs_edges(G,source=source)) + return dict((t, s) for s, t in dfs_edges(G, source=source)) def dfs_successors(G, source=None): @@ -158,7 +167,7 @@ def dfs_successors(G, source=None): Examples -------- >>> G = nx.path_graph(3) - >>> print(nx.dfs_successors(G,0)) + >>> print(nx.dfs_successors(G, 0)) {0: [1], 1: [2]} Notes @@ -170,13 +179,13 @@ def dfs_successors(G, source=None): repeatedly until all components in the graph are searched. """ d = defaultdict(list) - for s,t in dfs_edges(G,source=source): + for s, t in dfs_edges(G, source=source): d[s].append(t) return dict(d) -def dfs_postorder_nodes(G,source=None): - """Produce nodes in a depth-first-search post-ordering starting +def dfs_postorder_nodes(G, source=None): + """Iterate over nodes in a depth-first-search post-ordering starting from source. Parameters @@ -195,7 +204,7 @@ def dfs_postorder_nodes(G,source=None): Examples -------- >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_postorder_nodes(G,0))) + >>> print(list(nx.dfs_postorder_nodes(G, 0))) [2, 1, 0] Notes @@ -205,16 +214,21 @@ def dfs_postorder_nodes(G,source=None): If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_labeled_edges """ post = (v for u, v, d in nx.dfs_labeled_edges(G, source=source) if d == 'reverse') # potential modification: chain source to end of post-ordering - # return chain(post,[source]) + # return chain(post, [source]) return post def dfs_preorder_nodes(G, source=None): - """Produce nodes in a depth-first-search pre-ordering starting + """Iterate over nodes in a depth-first-search pre-ordering starting from source. Parameters @@ -233,7 +247,7 @@ def dfs_preorder_nodes(G, source=None): Examples -------- >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_preorder_nodes(G,0))) + >>> print(list(nx.dfs_preorder_nodes(G, 0))) [0, 1, 2] Notes @@ -243,16 +257,22 @@ def dfs_preorder_nodes(G, source=None): If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + + See Also + -------- + dfs_edges + dfs_postorder_nodes + dfs_labeled_edges """ pre = (v for u, v, d in nx.dfs_labeled_edges(G, source=source) if d == 'forward') # potential modification: chain source to beginning of pre-ordering - # return chain([source],pre) + # return chain([source], pre) return pre def dfs_labeled_edges(G, source=None): - """Produce edges in a depth-first-search (DFS) labeled by type. + """Iterate over edges in a depth-first-search (DFS) labeled by type. Parameters ---------- @@ -300,14 +320,19 @@ def dfs_labeled_edges(G, source=None): If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_postorder_nodes """ # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py # by D. Eppstein, July 2004. if source is None: - # produce edges for all components + # edges for all components nodes = G else: - # produce edges for components with source + # edges for components with source nodes = [source] visited = set() for start in nodes: @@ -315,9 +340,9 @@ def dfs_labeled_edges(G, source=None): continue yield start, start, 'forward' visited.add(start) - stack = [(start,iter(G[start]))] + stack = [(start, iter(G[start]))] while stack: - parent,children = stack[-1] + parent, children = stack[-1] try: child = next(children) if child in visited: @@ -325,7 +350,7 @@ def dfs_labeled_edges(G, source=None): else: yield parent, child, 'forward' visited.add(child) - stack.append((child,iter(G[child]))) + stack.append((child, iter(G[child]))) except StopIteration: stack.pop() if stack:
Documentation of networkx.traversal module The functions in <code>networkx.traversal</code> in 1.8 have some counter intuitive behavior, e.g. no return value of <code>dfs_edges</code>, and inconsistent use of terms, e.g. "return" and "produce" (What means "produce"? Is the return value backup up by the graph's node/edge list?) could be used to distungish between functions with return values and those without, at least there shouldn't be a mix.
Some of those functions, like `dfs_edges` return Python generators. This is often the most efficient way to iterate over the edges. f you want to expand the result to a list you can call `list(dfs_edges)`. The type of object returned is documented in the "Returns" section of the function. Produce is maybe not the best choice of verb for what this does. We could alternatively simply use "generate" since the function returns a "generator". OK... from this the following issues arise: - <code>dfs_edges</code> return a list of edge tuples (source x target) -> Do I have to construct a DiGraph to get a node list for the path? -> please add some use cases, explain, give examples (it is again not intuitive that the return value is a list of tuple, is a DFS/BFS one expects a node list -> that doesn't hurt, but I strongly recommend to document) - <code>dfs_tree</code> returns a graph -> How is this graph related to doing a DFS (Do I get the path from <code>DiGraph.node_iter()</code>, <code>DiGraph.nodes()</code>, <code>networkx.magic_function(G)</code>?) Thanks for your hints. You don't have to answer the questions here (they're rather rhetorical and should serve as orientation to improve the docs) :) This is about [`algorithms/traversal/depth_first_search.py`](https://github.com/networkx/networkx/blob/master/networkx/algorithms/traversal/depth_first_search.py). I has changed a little since Jun 2014, but not the docs. I think it's easy to expand on `dfs_edges` by showing how a graph could be rebuild using the edges or by showing how one could get nodes from the result. However I don't understand the proposal for `dfs_tree`: Why should the function work on a list of nodes (as is the case for `DiGraph.nodes()` or `DiGraph.node_iter()`)? The docs look ok to me. And there are examples. Do they make sense to you? If so perhaps we can close this. On Wednesday, March 2, 2016, Michael E. Rose [email protected] wrote: > This is about algorithms/traversal/depth_first_search.py > https://github.com/networkx/networkx/blob/master/networkx/algorithms/traversal/depth_first_search.py. > I has changed a little since Jun 2014, but not the docs. > > I think it's easy to expand on dfs_edges by showing how a graph could be > rebuild using the edges or by showing how one could get nodes from the > result. > > However I don't understand the proposal for dfs_tree: Why should the > function work on a list of nodes (as is the case for DiGraph.nodes() or > DiGraph.node_iter())? > > — > Reply to this email directly or view it on GitHub > https://github.com/networkx/networkx/issues/1195#issuecomment-191454823. The documentation has been approved apparently. I didn't check for completeness of docs.
2017-06-13T20:53:31
networkx/networkx
2,472
networkx__networkx-2472
[ "2271" ]
583170d9740c3d6742ff116207da6d58d2682221
diff --git a/networkx/classes/graph.py b/networkx/classes/graph.py --- a/networkx/classes/graph.py +++ b/networkx/classes/graph.py @@ -141,6 +141,10 @@ class Graph(object): >>> G[1][2]['weight'] = 4.7 >>> G.edge[1][2]['weight'] = 4 + Warning: assigning to `G.edge[u]` or `G.edge[u][v]` will almost certainly + corrupt the graph data structure. Use 3 sets of brackets as shown above. + (4 for multigraphs: `MG.edge[u][v][key][name] = value`) + **Shortcuts:** Many common graph features allow python syntax to speed reporting.
adding attr dict to non-existent edge causes graph inconsistency G.edge[node1][node2] = {"test":"test} where G is of type networkx.classes.graph.Graph will add an edge from node1 to node2, but node1 will not be in node2's neighbor list (G.neighbors(node2). This can lead to an inconsistent graph with an edge leading to a non-existent node. (tested on networkx 1.11)
The method for adding edge to a graph is `G.add_edge(u, v)`. Modifying the underlying data structure dictionaries directly will certainly cause inconsistencies, but there's not much we can do about that (that's a feature of Python). So I guess all we can say in this situation is "don't do that". Is there some change to the code or documentation that you are requesting here? I'll be honest, I didn't realize `G.edge[node1][node2] = {"test":"test}` wasn't a supported way of doing it. I suppose some way of warning users not to use it would be best. Perhaps a note in the [Edge Attributes](https://networkx.readthedocs.io/en/latest/tutorial/tutorial.html#edge-attributes) section of the tutorial and the class-level documentation for the `Graph` class to only read from `Graph.edge`, not write to it?
2017-06-13T21:33:23
networkx/networkx
2,473
networkx__networkx-2473
[ "2323" ]
90c1645ef6dd4f80a8a09fb4ec597d99ab57f1de
diff --git a/networkx/algorithms/cycles.py b/networkx/algorithms/cycles.py --- a/networkx/algorithms/cycles.py +++ b/networkx/algorithms/cycles.py @@ -1,14 +1,18 @@ -""" -======================== -Cycle finding algorithms -======================== -""" # Copyright (C) 2010-2012 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. +# +# Authors: Jon Olav Vik <[email protected]>, +# Dan Schult <[email protected]> +# Aric Hagberg <[email protected]> +""" +======================== +Cycle finding algorithms +======================== +""" from collections import defaultdict @@ -17,16 +21,13 @@ from networkx.algorithms.traversal.edgedfs import helper_funcs, edge_dfs __all__ = [ - 'cycle_basis','simple_cycles','recursive_simple_cycles', 'find_cycle' + 'cycle_basis', 'simple_cycles', 'recursive_simple_cycles', 'find_cycle' ] -__author__ = "\n".join(['Jon Olav Vik <[email protected]>', - 'Dan Schult <[email protected]>', - 'Aric Hagberg <[email protected]>']) @not_implemented_for('directed') @not_implemented_for('multigraph') -def cycle_basis(G,root=None): +def cycle_basis(G, root=None): """ Returns a list of cycles which form a basis for cycles of G. A basis for cycles of a network is a minimal collection of @@ -49,10 +50,10 @@ def cycle_basis(G,root=None): Examples -------- - >>> G=nx.Graph() + >>> G = nx.Graph() >>> nx.add_cycle(G, [0, 1, 2, 3]) >>> nx.add_cycle(G, [0, 3, 4, 5]) - >>> print(nx.cycle_basis(G,0)) + >>> print(nx.cycle_basis(G, 0)) [[3, 4, 5, 0], [1, 2, 3, 0]] Notes @@ -68,36 +69,36 @@ def cycle_basis(G,root=None): -------- simple_cycles """ - gnodes=set(G.nodes()) - cycles=[] + gnodes = set(G.nodes()) + cycles = [] while gnodes: # loop over connected components if root is None: - root=gnodes.pop() - stack=[root] - pred={root:root} - used={root:set()} + root = gnodes.pop() + stack = [root] + pred = {root: root} + used = {root: set()} while stack: # walk the spanning tree finding cycles - z=stack.pop() # use last-in so cycles easier to find - zused=used[z] + z = stack.pop() # use last-in so cycles easier to find + zused = used[z] for nbr in G[z]: if nbr not in used: # new node - pred[nbr]=z + pred[nbr] = z stack.append(nbr) - used[nbr]=set([z]) - elif nbr == z: # self loops + used[nbr] = set([z]) + elif nbr == z: # self loops cycles.append([z]) - elif nbr not in zused:# found a cycle - pn=used[nbr] - cycle=[nbr,z] - p=pred[z] + elif nbr not in zused: # found a cycle + pn = used[nbr] + cycle = [nbr, z] + p = pred[z] while p not in pn: cycle.append(p) - p=pred[p] + p = pred[p] cycle.append(p) cycles.append(cycle) used[nbr].add(z) - gnodes-=set(pred) - root=None + gnodes -= set(pred) + root = None return cycles @@ -125,7 +126,8 @@ def simple_cycles(G): Examples -------- - >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) >>> len(list(nx.simple_cycles(G))) 5 @@ -161,10 +163,10 @@ def simple_cycles(G): -------- cycle_basis """ - def _unblock(thisnode,blocked,B): - stack=set([thisnode]) + def _unblock(thisnode, blocked, B): + stack = set([thisnode]) while stack: - node=stack.pop() + node = stack.pop() if node in blocked: blocked.remove(node) stack.update(B[node]) @@ -173,51 +175,49 @@ def _unblock(thisnode,blocked,B): # Johnson's algorithm requires some ordering of the nodes. # We assign the arbitrary ordering given by the strongly connected comps # There is no need to track the ordering as each node removed as processed. - subG = type(G)(G.edges()) # save the actual graph so we can mutate it here - # We only take the edges because we do not want to - # copy edge and node attributes here. + # Also we save the actual graph so we can mutate it. We only take the + # edges because we do not want to copy edge and node attributes here. + subG = type(G)(G.edges()) sccs = list(nx.strongly_connected_components(subG)) while sccs: - scc=sccs.pop() + scc = sccs.pop() # order of scc determines ordering of nodes startnode = scc.pop() # Processing node runs "circuit" routine from recursive version - path=[startnode] - blocked = set() # vertex: blocked from search? - closed = set() # nodes involved in a cycle + path = [startnode] + blocked = set() # vertex: blocked from search? + closed = set() # nodes involved in a cycle blocked.add(startnode) - B=defaultdict(set) # graph portions that yield no elementary circuit - stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs + B = defaultdict(set) # graph portions that yield no elementary circuit + stack = [(startnode, list(subG[startnode]))] # subG gives comp nbrs while stack: - thisnode,nbrs = stack[-1] + thisnode, nbrs = stack[-1] if nbrs: nextnode = nbrs.pop() -# print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode -# f=raw_input("pause") if nextnode == startnode: yield path[:] closed.update(path) -# print "Found a cycle",path,closed +# print "Found a cycle", path, closed elif nextnode not in blocked: path.append(nextnode) - stack.append( (nextnode,list(subG[nextnode])) ) + stack.append((nextnode, list(subG[nextnode]))) closed.discard(nextnode) blocked.add(nextnode) continue # done with nextnode... look for more neighbors if not nbrs: # no more nbrs if thisnode in closed: - _unblock(thisnode,blocked,B) + _unblock(thisnode, blocked, B) else: for nbr in subG[thisnode]: if thisnode not in B[nbr]: B[nbr].add(thisnode) stack.pop() -# assert path[-1]==thisnode +# assert path[-1] == thisnode path.pop() # done processing this node subG.remove_node(startnode) - H=subG.subgraph(scc) # make smaller to avoid work in SCC routine + H = subG.subgraph(scc) # make smaller to avoid work in SCC routine sccs.extend(list(nx.strongly_connected_components(H))) @@ -245,7 +245,8 @@ def recursive_simple_cycles(G): Example: - >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) >>> nx.recursive_simple_cycles(G) [[0], [0, 1, 2], [0, 2], [1, 2], [2]] @@ -279,10 +280,10 @@ def _unblock(thisnode): _unblock(B[thisnode].pop()) def circuit(thisnode, startnode, component): - closed = False # set to True if elementary path is closed + closed = False # set to True if elementary path is closed path.append(thisnode) blocked[thisnode] = True - for nextnode in component[thisnode]: # direct successors of thisnode + for nextnode in component[thisnode]: # direct successors of thisnode if nextnode == startnode: result.append(path[:]) closed = True @@ -293,18 +294,18 @@ def circuit(thisnode, startnode, component): _unblock(thisnode) else: for nextnode in component[thisnode]: - if thisnode not in B[nextnode]: # TODO: use set for speedup? + if thisnode not in B[nextnode]: # TODO: use set for speedup? B[nextnode].append(thisnode) - path.pop() # remove thisnode from path + path.pop() # remove thisnode from path return closed - path = [] # stack of nodes in current path - blocked = defaultdict(bool) # vertex: blocked from search? - B = defaultdict(list) # graph portions that yield no elementary circuit - result = [] # list to accumulate the circuits found + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found # Johnson's algorithm requires some ordering of the nodes. # They might not be sortable so we assign an arbitrary ordering. - ordering=dict(zip(G,range(len(G)))) + ordering = dict(zip(G, range(len(G)))) for s in ordering: # Build the subgraph induced by s and following nodes in the ordering subgraph = G.subgraph(node for node in G @@ -312,18 +313,18 @@ def circuit(thisnode, startnode, component): # Find the strongly connected component in the subgraph # that contains the least node according to the ordering strongcomp = nx.strongly_connected_components(subgraph) - mincomp=min(strongcomp, - key=lambda nodes: min(ordering[n] for n in nodes)) + mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns)) component = G.subgraph(mincomp) if component: # smallest node in the component according to the ordering - startnode = min(component,key=ordering.__getitem__) + startnode = min(component, key=ordering.__getitem__) for node in component: blocked[node] = False B[node][:] = [] - dummy=circuit(startnode, startnode, component) + dummy = circuit(startnode, startnode, component) return result + def find_cycle(G, source=None, orientation='original'): """ Returns the edges of a cycle found via a directed, depth-first traversal. @@ -379,7 +380,7 @@ def find_cycle(G, source=None, orientation='original'): is also known as a polytree). >>> import networkx as nx - >>> G = nx.DiGraph([(0,1), (0,2), (1,2)]) + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) >>> try: ... find_cycle(G, orientation='original') ... except: @@ -391,32 +392,6 @@ def find_cycle(G, source=None, orientation='original'): """ out_edge, key, tailhead = helper_funcs(G, orientation) - def prune(edges, active_nodes): - # This edge results from backtracking. - # Pop until we get a node whose head equals the current tail. - # So for example, we might have: - # [(0,1), (1,2), (2,3)], (1,4) - # which must become: - # [(0,1)], (1,4) - while True: - try: - popped_edge = edges.pop() - except IndexError: - edges = [] - active_nodes = {tail} - break - else: - popped_head = tailhead(popped_edge)[1] - active_nodes.remove(popped_head) - - if edges: - previous_head = tailhead(edges[-1])[1] - if tail == previous_head: - break - else: - previous_head = None - return edges, active_nodes, previous_head - explored = set() cycle = [] final_node = None @@ -435,8 +410,31 @@ def prune(edges, active_nodes): for edge in edge_dfs(G, start_node, orientation): # Determine if this edge is a continuation of the active path. tail, head = tailhead(edge) + if head in explored: + # Then we've already explored it. No loop is possible. + continue if previous_head is not None and tail != previous_head: - edges, active_nodes, previous_head = prune(edges, active_nodes) + # This edge results from backtracking. + # Pop until we get a node whose head equals the current tail. + # So for example, we might have: + # (0, 1), (1, 2), (2, 3), (1, 4) + # which must become: + # (0, 1), (1, 4) + while True: + try: + popped_edge = edges.pop() + except IndexError: + edges = [] + active_nodes = {tail} + break + else: + popped_head = tailhead(popped_edge)[1] + active_nodes.remove(popped_head) + + if edges: + last_head = tailhead(edges[-1])[1] + if tail == last_head: + break edges.append(edge) if head in active_nodes: @@ -445,11 +443,9 @@ def prune(edges, active_nodes): final_node = head break else: - previous_head = head seen.add(head) active_nodes.add(head) - if head in explored: - edges, active_nodes, previous_head = prune(edges, active_nodes) + previous_head = head if cycle: break @@ -469,4 +465,3 @@ def prune(edges, active_nodes): break return cycle[i:] -
diff --git a/networkx/algorithms/tests/test_cycles.py b/networkx/algorithms/tests/test_cycles.py --- a/networkx/algorithms/tests/test_cycles.py +++ b/networkx/algorithms/tests/test_cycles.py @@ -7,55 +7,58 @@ FORWARD = nx.algorithms.edgedfs.FORWARD REVERSE = nx.algorithms.edgedfs.REVERSE + class TestCycles: def setUp(self): - G=networkx.Graph() - nx.add_cycle(G, [0,1,2,3]) - nx.add_cycle(G, [0,3,4,5]) - nx.add_cycle(G, [0,1,6,7,8]) - G.add_edge(8,9) - self.G=G - - def is_cyclic_permutation(self,a,b): - n=len(a) - if len(b)!=n: + G = networkx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 3, 4, 5]) + nx.add_cycle(G, [0, 1, 6, 7, 8]) + G.add_edge(8, 9) + self.G = G + + def is_cyclic_permutation(self, a, b): + n = len(a) + if len(b) != n: return False - l=a+a - return any(l[i:i+n]==b for i in range(2*n-n+1)) + l = a + a + return any(l[i:i+n] == b for i in range(2 * n - n + 1)) def test_cycle_basis(self): - G=self.G - cy=networkx.cycle_basis(G,0) - sort_cy= sorted( sorted(c) for c in cy ) - assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]]) - cy=networkx.cycle_basis(G,1) - sort_cy= sorted( sorted(c) for c in cy ) - assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]]) - cy=networkx.cycle_basis(G,9) - sort_cy= sorted( sorted(c) for c in cy ) - assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]]) + G = self.G + cy = networkx.cycle_basis(G, 0) + sort_cy = sorted(sorted(c) for c in cy) + assert_equal(sort_cy, [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]]) + cy = networkx.cycle_basis(G, 1) + sort_cy = sorted(sorted(c) for c in cy) + assert_equal(sort_cy, [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]]) + cy = networkx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy) + assert_equal(sort_cy, [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]]) # test disconnected graphs nx.add_cycle(G, "ABC") - cy=networkx.cycle_basis(G,9) - sort_cy= sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] - assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5],['A','B','C']]) + cy = networkx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] + assert_equal(sort_cy, [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5], + ['A', 'B', 'C']]) @raises(nx.NetworkXNotImplemented) def test_cycle_basis(self): - G=nx.DiGraph() - cy=networkx.cycle_basis(G,0) + G = nx.DiGraph() + cy = networkx.cycle_basis(G, 0) @raises(nx.NetworkXNotImplemented) def test_cycle_basis(self): - G=nx.MultiGraph() - cy=networkx.cycle_basis(G,0) + G = nx.MultiGraph() + cy = networkx.cycle_basis(G, 0) def test_simple_cycles(self): - G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]) - cc=sorted(nx.simple_cycles(G)) - ca=[[0], [0, 1, 2], [0, 2], [1, 2], [2]] + edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + G = nx.DiGraph(edges) + cc = sorted(nx.simple_cycles(G)) + ca = [[0], [0, 1, 2], [0, 2], [1, 2], [2]] for c in cc: - assert_true(any(self.is_cyclic_permutation(c,rc) for rc in ca)) + assert_true(any(self.is_cyclic_permutation(c, rc) for rc in ca)) @raises(nx.NetworkXNotImplemented) def test_simple_cycles_graph(self): @@ -64,88 +67,89 @@ def test_simple_cycles_graph(self): def test_unsortable(self): # TODO What does this test do? das 6/2013 - G=nx.DiGraph() - nx.add_cycle(G, ['a',1]) - c=list(nx.simple_cycles(G)) + G = nx.DiGraph() + nx.add_cycle(G, ['a', 1]) + c = list(nx.simple_cycles(G)) def test_simple_cycles_small(self): G = nx.DiGraph() - nx.add_cycle(G, [1,2,3]) - c=sorted(nx.simple_cycles(G)) - assert_equal(len(c),1) - assert_true(self.is_cyclic_permutation(c[0],[1,2,3])) - nx.add_cycle(G, [10,20,30]) - cc=sorted(nx.simple_cycles(G)) - ca=[[1,2,3],[10,20,30]] + nx.add_cycle(G, [1, 2, 3]) + c = sorted(nx.simple_cycles(G)) + assert_equal(len(c), 1) + assert_true(self.is_cyclic_permutation(c[0], [1, 2, 3])) + nx.add_cycle(G, [10, 20, 30]) + cc = sorted(nx.simple_cycles(G)) + ca = [[1, 2, 3], [10, 20, 30]] for c in cc: - assert_true(any(self.is_cyclic_permutation(c,rc) for rc in ca)) + assert_true(any(self.is_cyclic_permutation(c, rc) for rc in ca)) def test_simple_cycles_empty(self): G = nx.DiGraph() - assert_equal(list(nx.simple_cycles(G)),[]) + assert_equal(list(nx.simple_cycles(G)), []) def test_complete_directed_graph(self): # see table 2 in Johnson's paper - ncircuits=[1,5,20,84,409,2365,16064] - for n,c in zip(range(2,9),ncircuits): - G=nx.DiGraph(nx.complete_graph(n)) - assert_equal(len(list(nx.simple_cycles(G))),c) + ncircuits = [1, 5, 20, 84, 409, 2365, 16064] + for n, c in zip(range(2, 9), ncircuits): + G = nx.DiGraph(nx.complete_graph(n)) + assert_equal(len(list(nx.simple_cycles(G))), c) - def worst_case_graph(self,k): + def worst_case_graph(self, k): # see figure 1 in Johnson's paper # this graph has excactly 3k simple cycles - G=nx.DiGraph() - for n in range(2,k+2): - G.add_edge(1,n) - G.add_edge(n,k+2) - G.add_edge(2*k+1,1) - for n in range(k+2,2*k+2): - G.add_edge(n,2*k+2) - G.add_edge(n,n+1) - G.add_edge(2*k+3,k+2) - for n in range(2*k+3,3*k+3): - G.add_edge(2*k+2,n) - G.add_edge(n,3*k+3) - G.add_edge(3*k+3,2*k+2) + G = nx.DiGraph() + for n in range(2, k+2): + G.add_edge(1, n) + G.add_edge(n, k+2) + G.add_edge(2*k+1, 1) + for n in range(k+2, 2*k+2): + G.add_edge(n, 2*k+2) + G.add_edge(n, n+1) + G.add_edge(2*k+3, k+2) + for n in range(2*k+3, 3*k+3): + G.add_edge(2*k+2, n) + G.add_edge(n, 3*k+3) + G.add_edge(3*k+3, 2*k+2) return G def test_worst_case_graph(self): # see figure 1 in Johnson's paper - for k in range(3,10): - G=self.worst_case_graph(k) - l=len(list(nx.simple_cycles(G))) - assert_equal(l,3*k) + for k in range(3, 10): + G = self.worst_case_graph(k) + l = len(list(nx.simple_cycles(G))) + assert_equal(l, 3*k) def test_recursive_simple_and_not(self): - for k in range(2,10): - G=self.worst_case_graph(k) - cc=sorted(nx.simple_cycles(G)) - rcc=sorted(nx.recursive_simple_cycles(G)) - assert_equal(len(cc),len(rcc)) + for k in range(2, 10): + G = self.worst_case_graph(k) + cc = sorted(nx.simple_cycles(G)) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert_equal(len(cc), len(rcc)) for c in cc: - assert_true(any(self.is_cyclic_permutation(c,rc) for rc in rcc)) + assert_true(any(self.is_cyclic_permutation(c, r) for r in rcc)) for rc in rcc: - assert_true(any(self.is_cyclic_permutation(rc,c) for c in cc)) + assert_true(any(self.is_cyclic_permutation(rc, c) for c in cc)) def test_simple_graph_with_reported_bug(self): - G=nx.DiGraph() - edges = [(0, 2), (0, 3), (1, 0), (1, 3), (2, 1), (2, 4), \ - (3, 2), (3, 4), (4, 0), (4, 1), (4, 5), (5, 0), \ - (5, 1), (5, 2), (5, 3)] + G = nx.DiGraph() + edges = [(0, 2), (0, 3), (1, 0), (1, 3), (2, 1), (2, 4), + (3, 2), (3, 4), (4, 0), (4, 1), (4, 5), (5, 0), + (5, 1), (5, 2), (5, 3)] G.add_edges_from(edges) - cc=sorted(nx.simple_cycles(G)) - assert_equal(len(cc),26) - rcc=sorted(nx.recursive_simple_cycles(G)) - assert_equal(len(cc),len(rcc)) + cc = sorted(nx.simple_cycles(G)) + assert_equal(len(cc), 26) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert_equal(len(cc), len(rcc)) for c in cc: - assert_true(any(self.is_cyclic_permutation(c,rc) for rc in rcc)) + assert_true(any(self.is_cyclic_permutation(c, rc) for rc in rcc)) for rc in rcc: - assert_true(any(self.is_cyclic_permutation(rc,c) for c in cc)) + assert_true(any(self.is_cyclic_permutation(rc, c) for c in cc)) # These tests might fail with hash randomization since they depend on # edge_dfs. For more information, see the comments in: # networkx/algorithms/traversal/tests/test_edgedfs.py + class TestFindCycle(object): def setUp(self): self.nodes = [0, 1, 2, 3] @@ -158,13 +162,13 @@ def test_graph(self): def test_digraph(self): G = nx.DiGraph(self.edges) x = list(find_cycle(G, self.nodes)) - x_= [(0, 1), (1, 0)] + x_ = [(0, 1), (1, 0)] assert_equal(x, x_) def test_multigraph(self): G = nx.MultiGraph(self.edges) x = list(find_cycle(G, self.nodes)) - x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) + x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) # Hash randomization...could be any edge. assert_equal(x[0], x_[0]) assert_equal(x[1][:2], x_[1][:2]) @@ -172,7 +176,7 @@ def test_multigraph(self): def test_multidigraph(self): G = nx.MultiDiGraph(self.edges) x = list(find_cycle(G, self.nodes)) - x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) + x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) assert_equal(x[0], x_[0]) assert_equal(x[1][:2], x_[1][:2]) @@ -185,16 +189,16 @@ def test_digraph_ignore(self): def test_multidigraph_ignore(self): G = nx.MultiDiGraph(self.edges) x = list(find_cycle(G, self.nodes, orientation='ignore')) - x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) + x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) assert_equal(x[0], x_[0]) assert_equal(x[1][:2], x_[1][:2]) assert_equal(x[1][3], x_[1][3]) def test_multidigraph_ignore2(self): # Loop traversed an edge while ignoring its orientation. - G = nx.MultiDiGraph([(0,1), (1,2), (1,2)]) - x = list(find_cycle(G, [0,1,2], orientation='ignore')) - x_ = [(1,2,0,FORWARD), (1,2,1,REVERSE)] + G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)]) + x = list(find_cycle(G, [0, 1, 2], orientation='ignore')) + x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)] assert_equal(x, x_) def test_multidigraph_ignore2(self): @@ -202,24 +206,23 @@ def test_multidigraph_ignore2(self): # The goal here is to cover the case when 2 to be researched from 4, # when 4 is visited from the first time (so we must make sure that 4 # is not visited from 2, and hence, we respect the edge orientation). - G = nx.MultiDiGraph([(0,1), (1,2), (2,3), (4,2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)]) assert_raises(nx.exception.NetworkXNoCycle, - find_cycle, G, [0,1,2,3,4], orientation='original') + find_cycle, G, [0, 1, 2, 3, 4], orientation='original') def test_dag(self): - G = nx.DiGraph([(0,1), (0,2), (1,2)]) + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) assert_raises(nx.exception.NetworkXNoCycle, find_cycle, G, orientation='original') x = list(find_cycle(G, orientation='ignore')) - assert_equal(x, [(0,1,FORWARD), (1,2,FORWARD), (0,2,REVERSE)]) + assert_equal(x, [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)]) def test_prev_explored(self): # https://github.com/networkx/networkx/issues/2323 G = nx.DiGraph() - G.add_edges_from([(1,0), (2,0), (1,2), (2,1)]) - assert_raises(nx.exception.NetworkXNoCycle, - find_cycle, G, source=0) + G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)]) + assert_raises(nx.NetworkXNoCycle, find_cycle, G, source=0) x = list(nx.find_cycle(G, 1)) x_ = [(1, 2), (2, 1)] assert_equal(x, x_) @@ -227,3 +230,15 @@ def test_prev_explored(self): x = list(nx.find_cycle(G, 2)) x_ = [(2, 1), (1, 2)] assert_equal(x, x_) + + x = list(nx.find_cycle(G)) + x_ = [(1, 2), (2, 1)] + assert_equal(x, x_) + + def test_no_cycle(self): + # https://github.com/networkx/networkx/issues/2439 + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)]) + assert_raises(nx.NetworkXNoCycle, find_cycle, G, source=0) + assert_raises(nx.NetworkXNoCycle, find_cycle, G)
find_cycle() doesn't find cycle in cyclic digraph ```python print("version: {}".format(nx.__version__)) dg = nx.DiGraph() dg.add_edge(1, 0) dg.add_edge(2, 0) dg.add_edge(1, 2) dg.add_edge(2, 1) find_cycle(dg) ``` ``` version: 1.11 Traceback (most recent call last): File "<ipython-input-127-ea87764febbd>", line 7, in <module> find_cycle(dg) File "xxxx\AppData\Local\Continuum\Anaconda\lib\site-packages\networkx\algorithms\cycles.py", line 453, in find_cycle raise nx.exception.NetworkXNoCycle('No cycle found.') ``` I believe the problem lies in that when `head` is a previously explored node all paths from the current `start_node` are ignored instead of just pruning from `head` ```python elif head in explored: # Then we've already explored it. No loop is possible. break ``` Removing this condition fixes the bug as far as I can tell given the graphs I am working with. However, a more efficient solution that does proper pruning would be more efficient especially for larger graphs.
Thanks for the report @joebockhorst!
2017-06-14T00:09:45
networkx/networkx
2,474
networkx__networkx-2474
[ "2444" ]
2e29c4e6bffad2d58d8260240b2ced15b57aa6c9
diff --git a/networkx/algorithms/bipartite/projection.py b/networkx/algorithms/bipartite/projection.py --- a/networkx/algorithms/bipartite/projection.py +++ b/networkx/algorithms/bipartite/projection.py @@ -1,15 +1,17 @@ # -*- coding: utf-8 -*- -"""One-mode (unipartite) projections of bipartite graphs. -""" -import networkx as nx -# Copyright (C) 2011 by +# Copyright (C) 2017 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. -__author__ = """\n""".join(['Aric Hagberg <[email protected]>', - 'Jordi Torrents <[email protected]>']) +# +# Authors: Aric Hagberg <[email protected]> +# Jordi Torrents <[email protected]> +"""One-mode (unipartite) projections of bipartite graphs.""" +import networkx as nx +from networkx.utils import not_implemented_for + __all__ = ['project', 'projected_graph', 'weighted_projected_graph', @@ -17,17 +19,18 @@ 'overlap_weighted_projected_graph', 'generic_weighted_projected_graph'] + def projected_graph(B, nodes, multigraph=False): r"""Returns the projection of B onto one of its node sets. - Returns the graph G that is the projection of the bipartite graph B + Returns the graph G that is the projection of the bipartite graph B onto the specified nodes. They retain their attributes and are connected in G if they have a common neighbor in B. Parameters ---------- - B : NetworkX graph - The input graph should be bipartite. + B : NetworkX graph + The input graph should be bipartite. nodes : list or iterable Nodes to project onto (the "bottom" nodes). @@ -46,20 +49,20 @@ def projected_graph(B, nodes, multigraph=False): -------- >>> from networkx.algorithms import bipartite >>> B = nx.path_graph(4) - >>> G = bipartite.projected_graph(B, [1,3]) + >>> G = bipartite.projected_graph(B, [1, 3]) >>> list(G) [1, 3] >>> list(G.edges()) [(1, 3)] - + If nodes `a`, and `b` are connected through both nodes 1 and 2 then - building a multigraph results in two edges in the projection onto - [`a`,`b`]: + building a multigraph results in two edges in the projection onto + [`a`, `b`]: >>> B = nx.Graph() >>> B.add_edges_from([('a', 1), ('b', 1), ('a', 2), ('b', 2)]) >>> G = bipartite.projected_graph(B, ['a', 'b'], multigraph=True) - >>> print([sorted((u,v)) for u,v in G.edges()]) + >>> print([sorted((u, v)) for u, v in G.edges()]) [['a', 'b'], ['a', 'b']] Notes @@ -80,9 +83,9 @@ def projected_graph(B, nodes, multigraph=False): See Also -------- - is_bipartite, - is_bipartite_node_set, - sets, + is_bipartite, + is_bipartite_node_set, + sets, weighted_projected_graph, collaboration_weighted_projected_graph, overlap_weighted_projected_graph, @@ -91,75 +94,77 @@ def projected_graph(B, nodes, multigraph=False): if B.is_multigraph(): raise nx.NetworkXError("not defined for multigraphs") if B.is_directed(): - directed=True + directed = True if multigraph: - G=nx.MultiDiGraph() + G = nx.MultiDiGraph() else: - G=nx.DiGraph() + G = nx.DiGraph() else: - directed=False + directed = False if multigraph: - G=nx.MultiGraph() + G = nx.MultiGraph() else: - G=nx.Graph() + G = nx.Graph() G.graph.update(B.graph) - G.add_nodes_from((n,B.node[n]) for n in nodes) + G.add_nodes_from((n, B.node[n]) for n in nodes) for u in nodes: - nbrs2=set((v for nbr in B[u] for v in B[nbr])) -set([u]) + nbrs2 = set(v for nbr in B[u] for v in B[nbr] if v != u) if multigraph: for n in nbrs2: if directed: - links=set(B[u]) & set(B.pred[n]) + links = set(B[u]) & set(B.pred[n]) else: - links=set(B[u]) & set(B[n]) + links = set(B[u]) & set(B[n]) for l in links: - if not G.has_edge(u,n,l): - G.add_edge(u,n,key=l) + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) else: - G.add_edges_from((u,n) for n in nbrs2) + G.add_edges_from((u, n) for n in nbrs2) return G + +@not_implemented_for('multigraph') def weighted_projected_graph(B, nodes, ratio=False): r"""Returns a weighted projection of B onto one of its node sets. The weighted projected graph is the projection of the bipartite network B onto the specified nodes with weights representing the number of shared neighbors or the ratio between actual shared - neighbors and possible shared neighbors if ratio=True [1]_. The - nodes retain their attributes and are connected in the resulting graph - if they have an edge to a common node in the original graph. + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. Parameters ---------- - B : NetworkX graph - The input graph should be bipartite. + B : NetworkX graph + The input graph should be bipartite. nodes : list or iterable Nodes to project onto (the "bottom" nodes). ratio: Bool (default=False) - If True, edge weight is the ratio between actual shared neighbors - and possible shared neighbors. If False, edges weight is the number + If True, edge weight is the ratio between actual shared neighbors + and possible shared neighbors. If False, edges weight is the number of shared neighbors. Returns ------- - Graph : NetworkX graph + Graph : NetworkX graph A graph that is the projection onto the given nodes. Examples -------- >>> from networkx.algorithms import bipartite >>> B = nx.path_graph(4) - >>> G = bipartite.weighted_projected_graph(B, [1,3]) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) >>> list(G) [1, 3] >>> list(G.edges(data=True)) [(1, 3, {'weight': 1})] - >>> G = bipartite.weighted_projected_graph(B, [1,3], ratio=True) + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) >>> list(G.edges(data=True)) [(1, 3, {'weight': 0.5})] - + Notes ----- No attempt is made to verify that the input graph B is bipartite. @@ -170,30 +175,28 @@ def weighted_projected_graph(B, nodes, ratio=False): See Also -------- - is_bipartite, - is_bipartite_node_set, - sets, + is_bipartite, + is_bipartite_node_set, + sets, collaboration_weighted_projected_graph, overlap_weighted_projected_graph, generic_weighted_projected_graph - projected_graph + projected_graph References ---------- - .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation - Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. """ - if B.is_multigraph(): - raise nx.NetworkXError("not defined for multigraphs") if B.is_directed(): - pred=B.pred - G=nx.DiGraph() + pred = B.pred + G = nx.DiGraph() else: - pred=B.adj - G=nx.Graph() + pred = B.adj + G = nx.Graph() G.graph.update(B.graph) - G.add_nodes_from((n,B.node[n]) for n in nodes) + G.add_nodes_from((n, B.node[n]) for n in nodes) n_top = float(len(B) - len(nodes)) for u in nodes: unbrs = set(B[u]) @@ -205,9 +208,11 @@ def weighted_projected_graph(B, nodes, ratio=False): weight = len(common) else: weight = len(common) / n_top - G.add_edge(u,v,weight=weight) + G.add_edge(u, v, weight=weight) return G + +@not_implemented_for('multigraph') def collaboration_weighted_projected_graph(B, nodes): r"""Newman's weighted projection of B onto one of its node sets. @@ -216,47 +221,47 @@ def collaboration_weighted_projected_graph(B, nodes): using Newman's collaboration model [1]_: .. math:: - - w_{v,u} = \sum_k \frac{\delta_{v}^{w} \delta_{w}^{k}}{k_w - 1} - - where `v` and `u` are nodes from the same bipartite node set, - and `w` is a node of the opposite node set. - The value `k_w` is the degree of node `w` in the bipartite - network and `\delta_{v}^{w}` is 1 if node `v` is - linked to node `w` in the original bipartite graph or 0 otherwise. - + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + The nodes retain their attributes and are connected in the resulting graph if have an edge to a common node in the original bipartite graph. Parameters ---------- - B : NetworkX graph - The input graph should be bipartite. + B : NetworkX graph + The input graph should be bipartite. nodes : list or iterable Nodes to project onto (the "bottom" nodes). Returns ------- - Graph : NetworkX graph + Graph : NetworkX graph A graph that is the projection onto the given nodes. Examples -------- >>> from networkx.algorithms import bipartite >>> B = nx.path_graph(5) - >>> B.add_edge(1,5) + >>> B.add_edge(1, 5) >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) >>> list(G) [0, 2, 4, 5] >>> for edge in G.edges(data=True): print(edge) - ... + ... (0, 2, {'weight': 0.5}) (0, 5, {'weight': 0.5}) (2, 4, {'weight': 1.0}) (2, 5, {'weight': 0.5}) - + Notes ----- No attempt is made to verify that the input graph B is bipartite. @@ -267,67 +272,67 @@ def collaboration_weighted_projected_graph(B, nodes): See Also -------- - is_bipartite, - is_bipartite_node_set, - sets, + is_bipartite, + is_bipartite_node_set, + sets, weighted_projected_graph, overlap_weighted_projected_graph, generic_weighted_projected_graph, - projected_graph + projected_graph References ---------- - .. [1] Scientific collaboration networks: II. - Shortest paths, weighted networks, and centrality, + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). """ - if B.is_multigraph(): - raise nx.NetworkXError("not defined for multigraphs") if B.is_directed(): - pred=B.pred - G=nx.DiGraph() + pred = B.pred + G = nx.DiGraph() else: - pred=B.adj - G=nx.Graph() + pred = B.adj + G = nx.Graph() G.graph.update(B.graph) - G.add_nodes_from((n,B.node[n]) for n in nodes) + G.add_nodes_from((n, B.node[n]) for n in nodes) for u in nodes: unbrs = set(B[u]) - nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u]) + nbrs2 = set(n for nbr in unbrs for n in B[nbr] if n != u) for v in nbrs2: vnbrs = set(pred[v]) - common = unbrs & vnbrs - weight = sum([1.0/(len(B[n]) - 1) for n in common if len(B[n])>1]) - G.add_edge(u,v,weight=weight) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) return G + +@not_implemented_for('multigraph') def overlap_weighted_projected_graph(B, nodes, jaccard=True): r"""Overlap weighted projection of B onto one of its node sets. - The overlap weighted projection is the projection of the bipartite - network B onto the specified nodes with weights representing + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing the Jaccard index between the neighborhoods of the two nodes in the - original bipartite network [1]_: + original bipartite network [1]_: .. math:: - - w_{v,u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} - or if the parameter 'jaccard' is False, the fraction of common - neighbors by minimum of both nodes degree in the original + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original bipartite graph [1]_: - + .. math:: - w_{v,u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|,|N(v)|)} - + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + The nodes retain their attributes and are connected in the resulting graph if have an edge to a common node in the original bipartite graph. Parameters ---------- - B : NetworkX graph - The input graph should be bipartite. + B : NetworkX graph + The input graph should be bipartite. nodes : list or iterable Nodes to project onto (the "bottom" nodes). @@ -336,22 +341,23 @@ def overlap_weighted_projected_graph(B, nodes, jaccard=True): Returns ------- - Graph : NetworkX graph + Graph : NetworkX graph A graph that is the projection onto the given nodes. Examples -------- >>> from networkx.algorithms import bipartite >>> B = nx.path_graph(5) - >>> G = bipartite.overlap_weighted_projected_graph(B, [0, 2, 4]) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) >>> list(G) [0, 2, 4] >>> list(G.edges(data=True)) [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] - >>> G = bipartite.overlap_weighted_projected_graph(B, [0, 2, 4], jaccard=False) + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) >>> list(G.edges(data=True)) [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] - + Notes ----- No attempt is made to verify that the input graph B is bipartite. @@ -362,43 +368,43 @@ def overlap_weighted_projected_graph(B, nodes, jaccard=True): See Also -------- - is_bipartite, - is_bipartite_node_set, - sets, + is_bipartite, + is_bipartite_node_set, + sets, weighted_projected_graph, collaboration_weighted_projected_graph, generic_weighted_projected_graph, - projected_graph + projected_graph References ---------- - .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation - Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. - + """ - if B.is_multigraph(): - raise nx.NetworkXError("not defined for multigraphs") if B.is_directed(): - pred=B.pred - G=nx.DiGraph() + pred = B.pred + G = nx.DiGraph() else: - pred=B.adj - G=nx.Graph() + pred = B.adj + G = nx.Graph() G.graph.update(B.graph) - G.add_nodes_from((n,B.node[n]) for n in nodes) + G.add_nodes_from((n, B.node[n]) for n in nodes) for u in nodes: unbrs = set(B[u]) nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u]) for v in nbrs2: vnbrs = set(pred[v]) if jaccard: - weight = float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + wt = float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) else: - weight = float(len(unbrs & vnbrs)) / min(len(unbrs),len(vnbrs)) - G.add_edge(u,v,weight=weight) + wt = float(len(unbrs & vnbrs)) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) return G + +@not_implemented_for('multigraph') def generic_weighted_projected_graph(B, nodes, weight_function=None): r"""Weighted projection of B with a user-specified weight function. @@ -407,25 +413,25 @@ def generic_weighted_projected_graph(B, nodes, weight_function=None): must accept as a parameter the neighborhood sets of two nodes and return an integer or a float. - The nodes retain their attributes and are connected in the resulting graph + The nodes retain their attributes and are connected in the resulting graph if they have an edge to a common node in the original graph. Parameters ---------- - B : NetworkX graph - The input graph should be bipartite. + B : NetworkX graph + The input graph should be bipartite. nodes : list or iterable Nodes to project onto (the "bottom" nodes). - weight_function: function - This function must accept as parameters the same input graph + weight_function : function + This function must accept as parameters the same input graph that this function, and two nodes; and return an integer or a float. The default function computes the number of shared neighbors. Returns ------- - Graph : NetworkX graph + Graph : NetworkX graph A graph that is the projection onto the given nodes. Examples @@ -436,27 +442,27 @@ def generic_weighted_projected_graph(B, nodes, weight_function=None): ... unbrs = set(G[u]) ... vnbrs = set(G[v]) ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) - ... + ... >>> def my_weight(G, u, v, weight='weight'): ... w = 0 ... for nbr in set(G[u]) & set(G[v]): - ... w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1) + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) ... return w - ... + ... >>> # A complete bipartite graph with 4 nodes and 4 edges - >>> B = nx.complete_bipartite_graph(2,2) + >>> B = nx.complete_bipartite_graph(2, 2) >>> # Add some arbitrary weight to the edges - >>> for i,(u,v) in enumerate(B.edges()): + >>> for i, (u, v) in enumerate(B.edges()): ... B.edge[u][v]['weight'] = i + 1 - ... + ... >>> for edge in B.edges(data=True): ... print(edge) - ... + ... (0, 2, {'weight': 1}) (0, 3, {'weight': 2}) (1, 2, {'weight': 3}) (1, 3, {'weight': 4}) - >>> # Without specifying a function, the weight is equal to # shared partners + >>> # By default, the weight is the number of shared neighbors >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) >>> print(list(G.edges(data=True))) [(0, 1, {'weight': 2})] @@ -467,7 +473,7 @@ def generic_weighted_projected_graph(B, nodes, weight_function=None): >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1], weight_function=my_weight) >>> print(list(G.edges(data=True))) [(0, 1, {'weight': 10})] - + Notes ----- No attempt is made to verify that the input graph B is bipartite. @@ -478,34 +484,32 @@ def generic_weighted_projected_graph(B, nodes, weight_function=None): See Also -------- - is_bipartite, - is_bipartite_node_set, - sets, + is_bipartite, + is_bipartite_node_set, + sets, weighted_projected_graph, collaboration_weighted_projected_graph, overlap_weighted_projected_graph, - projected_graph + projected_graph """ - if B.is_multigraph(): - raise nx.NetworkXError("not defined for multigraphs") if B.is_directed(): - pred=B.pred - G=nx.DiGraph() + pred = B.pred + G = nx.DiGraph() else: - pred=B.adj - G=nx.Graph() + pred = B.adj + G = nx.Graph() if weight_function is None: def weight_function(G, u, v): # Notice that we use set(pred[v]) for handling the directed case. return len(set(G[u]) & set(pred[v])) G.graph.update(B.graph) - G.add_nodes_from((n,B.node[n]) for n in nodes) + G.add_nodes_from((n, B.node[n]) for n in nodes) for u in nodes: nbrs2 = set((n for nbr in set(B[u]) for n in B[nbr])) - set([u]) for v in nbrs2: weight = weight_function(B, u, v) - G.add_edge(u,v,weight=weight) + G.add_edge(u, v, weight=weight) return G def project(B, nodes, create_using=None):
Incorrect formula for collaboration_weighted_projected_graph The formula given in `collaboration_weighted_projected_graph` makes little sense. For reference, the one in the paper by Newman is: `w_{i, j} = \sum_k \frac{\delta_{i}^{k} \delta_{j}^{k}}{n_k - 1}` where `i` and `j` are nodes from the "bottom" node set (the set of nodes to project onto), and `k` is a node of the "top" node set. The value `n_k` is the degree of node `k` in the bipartite network and `delta_{i}^{k}` is 1 if node `i` is linked to node `k` in the original bipartite graph or 0 otherwise. It is probably also worth mentioning explicitly that "top" nodes with degree <= 1 are excluded.
For reference: http://networkx.readthedocs.io/en/stable/reference/generated/networkx.algorithms.bipartite.projection.collaboration_weighted_projected_graph.html?highlight=collaboration_weighted_projected_graph with `i = v`, `j = w`. It seems that the first delta should have superscript `k` and the denominator should be `k` (or `n^k`), but not `k_w`. To me the code looks correct, however. Can someone confirm? When fixing this, someone should apply PEP8 and add the decorator. I bet we can also gain some speed by computing a degree dictionary once rather than twice in each iteration.
2017-06-14T02:07:09
networkx/networkx
2,480
networkx__networkx-2480
[ "2223", "2223" ]
7d45f5210f84ba506714103f236a7d331e6e45d9
diff --git a/networkx/algorithms/shortest_paths/weighted.py b/networkx/algorithms/shortest_paths/weighted.py --- a/networkx/algorithms/shortest_paths/weighted.py +++ b/networkx/algorithms/shortest_paths/weighted.py @@ -141,12 +141,13 @@ def dijkstra_path(G, source, target, weight='weight'): will find the shortest red path. The weight function can be used to include node weights. - ``` - def func(u, v, d): - return G.node[u].get('node_weight', 1)/2 + \ - G.node[v].get('node_weight', 1)/2 + \ - d.get('weight', 1) - ``` + + >>> def func(u, v, d): + ... node_u_wt = G.node[u].get('node_weight', 1) + ... node_v_wt = G.node[v].get('node_weight', 1) + ... edge_wt = d.get('weight', 1) + ... return node_u_wt/2 + node_v_wt/2 + edge_wt + In this example we take the average of start and end node weights of an edge and add it to the weight of the edge. @@ -963,9 +964,9 @@ def bellman_ford(G, source, weight='weight'): """ _warnings.warn("Function bellman_ford() is deprecated, use function bellman_ford_predecessor_and_distance() instead.", DeprecationWarning) - - return bellman_ford_predecessor_and_distance(G, source, weight=weight) - + + return bellman_ford_predecessor_and_distance(G, source, weight=weight) + def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, weight='weight'): """Compute shortest path lengths and predecessors on shortest paths in weighted graphs. @@ -1052,7 +1053,7 @@ def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, w return pred, dist weight = _weight_function(G, weight) - + return (pred, _bellman_ford(G, [source], weight,pred=pred, dist=dist, cutoff=cutoff, target=target)) @@ -1063,7 +1064,7 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, Parameters ---------- G : NetworkX graph - + source: list List of source nodes @@ -1088,7 +1089,7 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, cutoff: integer or float, optional Depth to stop the search. Only paths of length <= cutoff are returned - + target: node label, optional Ending node for path. Path lengths to other destinations may (and probably will) be incorrect. @@ -1109,7 +1110,7 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, if pred is None: pred = {v: [None] for v in source} - + if dist is None: dist = {v: 0 for v in source} @@ -1133,11 +1134,11 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, if cutoff is not None: if dist_v > cutoff: continue - + if target is not None: if dist_v > dist.get(target, inf): continue - + if dist_v < dist.get(v, inf): if v not in in_q: q.append(v) @@ -1149,24 +1150,24 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, count[v] = count_v dist[v] = dist_v pred[v] = [u] - + elif dist.get(v) is not None and dist_v == dist.get(v): pred[v].append(u) if paths is not None: dsts = [target] if target is not None else pred for dst in dsts: - + path = [dst] cur = dst - + while pred[cur][0] is not None: cur = pred[cur][0] path.append(cur) - + path.reverse() paths[dst] = path - + return dist @@ -1217,7 +1218,7 @@ def bellman_ford_path(G, source, target, weight='weight'): except KeyError: raise nx.NetworkXNoPath( "Node %s not reachable from %s" % (source, target)) - + def bellman_ford_path_length(G, source, target, weight='weight'): """Returns the shortest path length from source to target in a weighted graph. @@ -1264,9 +1265,9 @@ def bellman_ford_path_length(G, source, target, weight='weight'): return 0 weight = _weight_function(G, weight) - + length = _bellman_ford(G, [source], weight, target=target) - + try: return length[target] except KeyError: diff --git a/networkx/generators/atlas.py b/networkx/generators/atlas.py --- a/networkx/generators/atlas.py +++ b/networkx/generators/atlas.py @@ -126,9 +126,9 @@ def graph_atlas(i): References ---------- - .. [atlas] Ronald C. Read and Robin J. Wilson, - *An Atlas of Graphs*. - Oxford University Press, 1998. + .. [graph_atlas] Ronald C. Read and Robin J. Wilson, + *An Atlas of Graphs*. + Oxford University Press, 1998. """ if not (0 <= i < NUM_GRAPHS):
Intersphinx links to Networkx graph classes don't work Suppose I set up a Sphinx build (outside of NetworkX) with `conf.py` contents ``` python extensions = [ 'sphinx.ext.intersphinx', ] intersphinx_mapping = { 'networkx': ('https://networkx.readthedocs.io/en/latest', None), } ``` and try to write an `index.rst` file with ``` rst :class:`networkx.Graph` ``` Attempting to render this documentation with Sphinx yields the warning ``` sh index.rst:14: WARNING: py:class reference target not found: networkx.Graph ``` This is because NetworkX generates the documentation for `networkx.Graph` as a _function_ instead of a _class_: ``` rst .. autofunction:: Graph ``` This can be fixed in NetworkX by changing `autofunction` to `autoclass`. In the meantime, as a workaround, the referring documentation can use `:func:`networkx.Graph`` instead of `:class:`networkx.Graph``. I remember bringing this up at some point but there was some reason why it was `autofunction`... Intersphinx links to Networkx graph classes don't work Suppose I set up a Sphinx build (outside of NetworkX) with `conf.py` contents ``` python extensions = [ 'sphinx.ext.intersphinx', ] intersphinx_mapping = { 'networkx': ('https://networkx.readthedocs.io/en/latest', None), } ``` and try to write an `index.rst` file with ``` rst :class:`networkx.Graph` ``` Attempting to render this documentation with Sphinx yields the warning ``` sh index.rst:14: WARNING: py:class reference target not found: networkx.Graph ``` This is because NetworkX generates the documentation for `networkx.Graph` as a _function_ instead of a _class_: ``` rst .. autofunction:: Graph ``` This can be fixed in NetworkX by changing `autofunction` to `autoclass`. In the meantime, as a workaround, the referring documentation can use `:func:`networkx.Graph`` instead of `:class:`networkx.Graph``. I remember bringing this up at some point but there was some reason why it was `autofunction`...
2017-06-19T21:32:43
networkx/networkx
2,485
networkx__networkx-2485
[ "2340" ]
3daa2e27051999c354b162c0379f4ea20bdc5142
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -59,7 +59,6 @@ def cd(newpath): # If your extensions are in another directory, add it here. # These locations are relative to conf.py -sys.path.append(os.path.abspath('../sphinxext')) # General configuration # --------------------- @@ -76,9 +75,6 @@ def cd(newpath): 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', - #'sphinxcontrib.bibtex', - #'IPython.sphinxext.ipython_console_highlighting', - #'IPython.sphinxext.ipython_directive', ] @@ -99,7 +95,7 @@ def cd(newpath): # General substitutions. project = 'NetworkX' -copyright = '2015, NetworkX Developers' +copyright = '2004-2017, NetworkX Developers' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. @@ -178,7 +174,7 @@ def cd(newpath): html_index = 'contents.html' # Custom sidebar templates, maps page names to templates. -#html_sidebars = {'index': 'indexsidebar.html'} +#html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # templates. @@ -193,8 +189,6 @@ def cd(newpath): # Output file base name for HTML help builder. htmlhelp_basename = 'NetworkX' -pngmath_use_preview = True - # Options for LaTeX output # ------------------------ @@ -222,10 +216,8 @@ def cd(newpath): 'http://docs.scipy.org/doc/numpy/': None, } -# For trac custom roles - +# The reST default role (used for this markup: `text`) to use for all +# documents. default_role = 'obj' -trac_url = 'https://networkx.lanl.gov/trac/' -mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML' numpydoc_show_class_members = False diff --git a/doc/sphinxext/customroles.py b/doc/sphinxext/customroles.py deleted file mode 100644 --- a/doc/sphinxext/customroles.py +++ /dev/null @@ -1,138 +0,0 @@ -""" - Custom Roles -""" - -from docutils import nodes, utils -from docutils.parsers.rst import roles - -from sphinx import addnodes -from sphinx.util import ws_re, caption_ref_re - -# http://www.doughellmann.com/articles/how-tos/sphinx-custom-roles/index.html -def sample_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - """Custom role. - - Parameters - ---------- - name : str - The name of the role, as used in the document. - rawtext : str - The markup, including the role declaration. - text : str - The text to be marked up by the role. - lineno : int - The line number where `rawtext` appears. - inliner : Inliner - The instance that called the role. - options : dict - Directive options for customizatoin. - content : list - The directive content for customization. - - Returns - ------- - nodes : list - The list of nodes to insert into the document. - msgs : list - The list of system messages, perhaps an error message. - - """ - pass - - -################## - - -prefixed_roles = { - # name: (prefix, baseuri) - 'arxiv': ('arXiv:', 'http://arxiv.org/abs/'), - 'doi': ('doi:', 'http://dx.doi.org/'), -} - -no_text_roles = [ - 'url', - 'pdf', -] - -def prefixed_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - prefix, baseuri = prefixed_roles[name] - uri = baseuri + text - display = utils.unescape(text) - node = nodes.literal(prefix, prefix) - ref = nodes.reference(rawtext, display, refuri=uri, **options) - node += ref # keep it in the 'literal' background - return [node], [] - -def url_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - uri = text - display = 'url' - node = nodes.literal('', '') - node += nodes.reference(rawtext, name, refuri=uri, **options) - return [node], [] - -def trac_ticket_role(name, rawtext, text, lineno, inliner, - options={}, content=[]): - app = inliner.document.settings.env.app - try: - base = app.config.trac_url - if not base: - raise AttributeError - except AttributeError as err: - msg = 'trac_url configuration value is not set (%s)' - raise ValueError(msg % str(err)) - - slash = '/' if base[-1] != '/' else '' - prefix = 'ticket ' - node = nodes.literal(prefix, prefix) - display = utils.unescape(text) - uri = base + slash + 'ticket/' + text - node += nodes.reference(rawtext, display, refuri=uri, **options) - return [node], [] - -def trac_changeset_role(name, rawtext, text, lineno, inliner, - options={}, content=[]): - app = inliner.document.settings.env.app - try: - base = app.config.trac_url - if not base: - raise AttributeError - except AttributeError as err: - msg = 'trac_url configuration value is not set (%s)' - raise ValueError(msg % str(err)) - - slash = '/' if base[-1] != '/' else '' - unescaped = utils.unescape(text) - prefix = 'changeset ' - node = nodes.literal(prefix, prefix) - - # Hard-coded for NetworkX - if unescaped.endswith('networkx-svn-archive'): - # Use the integer - display = unescaped.split('/')[0] - else: - # hg: use the first 12 hash characters - display = unescaped[:12] - - uri = base + slash + 'changeset/' + text - node += nodes.reference(rawtext, display, refuri=uri, **options) - return [node], [] - -active_roles = { - 'arxiv': prefixed_role, - 'doi': prefixed_role, - 'pdf': url_role, - 'url': url_role, - 'ticket': trac_ticket_role, - 'changeset': trac_changeset_role, -} - -# Add a generic docstring. -for role in active_roles.values(): - role.__doc__ = sample_role.__doc__ - -def setup(app): - for role, func in active_roles.items(): - roles.register_local_role(role, func) - app.add_config_value('trac_url', None, 'env') - -
pngmath is deprecated, causes build error Building on Fedora rawhide results in: ``` Running Sphinx v1.4.9 making output directory... WARNING: sphinx.ext.pngmath has been deprecated. Please use sphinx.ext.imgmath instead. Extension error: sphinx.ext.pngmath: other math package is already loaded ``` Simply removing it from the extensions allow it to build: ``` diff -up networkx-networkx-1.11/doc/source/conf.py.sphinx networkx-networkx-1.11/doc/source/conf.py --- networkx-networkx-1.11/doc/source/conf.py.sphinx 2016-01-30 10:25:44.000000000 -0700 +++ networkx-networkx-1.11/doc/source/conf.py 2016-12-23 09:54:55.280925006 -0700 @@ -74,7 +74,6 @@ extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', - 'sphinx.ext.pngmath', 'sphinx.ext.todo', 'sphinx.ext.viewcode', #'sphinxcontrib.bibtex', ``` There is also this line: ``` pngmath_use_preview = True ``` which perhaps also should be removed/changed. Upstream notes that pngmath has been deprecated: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/pngmath.py#L255
This issue was fixed in commit ffa3e5deb837 and should be closed. @hagberg Just noticed a small remaining item. I will submit a pull request to close this momentarily. Sorry for the noise.
2017-06-23T20:15:55
networkx/networkx
2,488
networkx__networkx-2488
[ "2482" ]
22a6ebaf0c235a825195e48558f39b65c26d5a1c
diff --git a/networkx/relabel.py b/networkx/relabel.py --- a/networkx/relabel.py +++ b/networkx/relabel.py @@ -6,10 +6,11 @@ # BSD license. import networkx as nx __author__ = """\n""".join(['Aric Hagberg <[email protected]>', - 'Pieter Swart ([email protected])', - 'Dan Schult ([email protected])']) + 'Pieter Swart ([email protected])', + 'Dan Schult ([email protected])']) __all__ = ['convert_node_labels_to_integers', 'relabel_nodes'] + def relabel_nodes(G, mapping, copy=True): """Relabel the nodes of the graph G. @@ -76,21 +77,21 @@ def relabel_nodes(G, mapping, copy=True): Only the nodes specified in the mapping will be relabeled. The keyword setting copy=False modifies the graph in place. - Relabel_nodes avoids naming collisions by building a + Relabel_nodes avoids naming collisions by building a directed graph from ``mapping`` which specifies the order of relabelings. Naming collisions, such as a->b, b->c, are ordered such that "b" gets renamed to "c" before "a" gets renamed "b". - In cases of circular mappings (e.g. a->b, b->a), modifying the + In cases of circular mappings (e.g. a->b, b->a), modifying the graph is not possible in-place and an exception is raised. In that case, use copy=True. - + See Also -------- convert_node_labels_to_integers """ # you can pass a function f(old_label)->new_label # but we'll just make a dictionary here regardless - if not hasattr(mapping,"__getitem__"): + if not hasattr(mapping, "__getitem__"): m = dict((n, mapping(n)) for n in G) else: m = mapping @@ -131,38 +132,39 @@ def _relabel_inplace(G, mapping): try: G.add_node(new, **G.node[old]) except KeyError: - raise KeyError("Node %s is not in the graph"%old) + raise KeyError("Node %s is not in the graph" % old) if multigraph: new_edges = [(new, new if old == target else target, key, data) - for (_,target,key,data) + for (_, target, key, data) in G.edges(old, data=True, keys=True)] if directed: new_edges += [(new if old == source else source, new, key, data) - for (source, _, key,data) + for (source, _, key, data) in G.in_edges(old, data=True, keys=True)] else: new_edges = [(new, new if old == target else target, data) - for (_,target,data) in G.edges(old, data=True)] + for (_, target, data) in G.edges(old, data=True)] if directed: - new_edges += [(new if old == source else source,new,data) - for (source,_,data) in G.in_edges(old, data=True)] + new_edges += [(new if old == source else source, new, data) + for (source, _, data) in G.in_edges(old, data=True)] G.remove_node(old) G.add_edges_from(new_edges) return G + def _relabel_copy(G, mapping): H = G.__class__() + H.add_nodes_from(mapping.get(n, n) for n in G) + H._node.update(dict((mapping.get(n, n), d.copy()) for n, d in G.node.items())) if G.name: H.name = "(%s)" % G.name if G.is_multigraph(): - H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy()) - for (n1,n2,k,d) in G.edges(keys=True, data=True)) + H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy()) + for (n1, n2, k, d) in G.edges(keys=True, data=True)) else: - H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),d.copy()) - for (n1, n2, d) in G.edges(data=True)) + H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), d.copy()) + for (n1, n2, d) in G.edges(data=True)) - H.add_nodes_from(mapping.get(n, n) for n in G) - H._node.update(dict((mapping.get(n, n), d.copy()) for n,d in G.node.items())) H.graph.update(G.graph.copy()) return H @@ -200,27 +202,27 @@ def convert_node_labels_to_integers(G, first_label=0, ordering="default", -------- relabel_nodes """ - N = G.number_of_nodes()+first_label + N = G.number_of_nodes() + first_label if ordering == "default": mapping = dict(zip(G.nodes(), range(first_label, N))) elif ordering == "sorted": nlist = sorted(G.nodes()) mapping = dict(zip(nlist, range(first_label, N))) elif ordering == "increasing degree": - dv_pairs = [(d,n) for (n,d) in G.degree()] - dv_pairs.sort() # in-place sort from lowest to highest degree - mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N))) + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) elif ordering == "decreasing degree": - dv_pairs = [(d,n) for (n,d) in G.degree()] - dv_pairs.sort() # in-place sort from lowest to highest degree + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree dv_pairs.reverse() - mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N))) + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) else: - raise nx.NetworkXError('Unknown node ordering: %s'%ordering) + raise nx.NetworkXError('Unknown node ordering: %s' % ordering) H = relabel_nodes(G, mapping) - H.name = "("+G.name+")_with_int_labels" + H.name = "(" + G.name + ")_with_int_labels" # create node attribute with the old label if label_attribute is not None: nx.set_node_attributes(H, label_attribute, - dict((v,k) for k,v in mapping.items())) + dict((v, k) for k, v in mapping.items())) return H
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py --- a/networkx/tests/test_relabel.py +++ b/networkx/tests/test_relabel.py @@ -3,153 +3,161 @@ from networkx import * from networkx.convert import * from networkx.algorithms.operators import * -from networkx.generators.classic import barbell_graph,cycle_graph +from networkx.generators.classic import barbell_graph, cycle_graph from networkx.testing import * + class TestRelabel(): def test_convert_node_labels_to_integers(self): # test that empty graph converts fine for all options - G=empty_graph() - H=convert_node_labels_to_integers(G,100) + G = empty_graph() + H = convert_node_labels_to_integers(G, 100) assert_equal(H.name, '(empty_graph(0))_with_int_labels') assert_equal(list(H.nodes()), []) assert_equal(list(H.edges()), []) - for opt in ["default", "sorted", "increasing degree", - "decreasing degree"]: - G=empty_graph() - H=convert_node_labels_to_integers(G,100, ordering=opt) + for opt in ["default", "sorted", "increasing degree", "decreasing degree"]: + G = empty_graph() + H = convert_node_labels_to_integers(G, 100, ordering=opt) assert_equal(H.name, '(empty_graph(0))_with_int_labels') assert_equal(list(H.nodes()), []) assert_equal(list(H.edges()), []) - G=empty_graph() - G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')]) - G.name="paw" - H=convert_node_labels_to_integers(G) + G = empty_graph() + G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) + G.name = "paw" + H = convert_node_labels_to_integers(G) degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) - H=convert_node_labels_to_integers(G,1000) + H = convert_node_labels_to_integers(G, 1000) degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) assert_nodes_equal(H.nodes(), [1000, 1001, 1002, 1003]) - H=convert_node_labels_to_integers(G,ordering="increasing degree") + H = convert_node_labels_to_integers(G, ordering="increasing degree") degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) - assert_equal(degree(H,0), 1) - assert_equal(degree(H,1), 2) - assert_equal(degree(H,2), 2) - assert_equal(degree(H,3), 3) + assert_equal(degree(H, 0), 1) + assert_equal(degree(H, 1), 2) + assert_equal(degree(H, 2), 2) + assert_equal(degree(H, 3), 3) - H=convert_node_labels_to_integers(G,ordering="decreasing degree") + H = convert_node_labels_to_integers(G, ordering="decreasing degree") degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) - assert_equal(degree(H,0), 3) - assert_equal(degree(H,1), 2) - assert_equal(degree(H,2), 2) - assert_equal(degree(H,3), 1) + assert_equal(degree(H, 0), 3) + assert_equal(degree(H, 1), 2) + assert_equal(degree(H, 2), 2) + assert_equal(degree(H, 3), 1) - H=convert_node_labels_to_integers(G,ordering="increasing degree", - label_attribute='label') + H = convert_node_labels_to_integers(G, ordering="increasing degree", + label_attribute='label') degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) - assert_equal(degree(H,0), 1) - assert_equal(degree(H,1), 2) - assert_equal(degree(H,2), 2) - assert_equal(degree(H,3), 3) + assert_equal(degree(H, 0), 1) + assert_equal(degree(H, 1), 2) + assert_equal(degree(H, 2), 2) + assert_equal(degree(H, 3), 3) # check mapping - assert_equal(H.node[3]['label'],'C') - assert_equal(H.node[0]['label'],'D') - assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A') - assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B') + assert_equal(H.node[3]['label'], 'C') + assert_equal(H.node[0]['label'], 'D') + assert_true(H.node[1]['label'] == 'A' or H.node[2]['label'] == 'A') + assert_true(H.node[1]['label'] == 'B' or H.node[2]['label'] == 'B') def test_convert_to_integers2(self): - G=empty_graph() - G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')]) - G.name="paw" - H=convert_node_labels_to_integers(G,ordering="sorted") + G = empty_graph() + G.add_edges_from([('C', 'D'), ('A', 'B'), ('A', 'C'), ('B', 'C')]) + G.name = "paw" + H = convert_node_labels_to_integers(G, ordering="sorted") degH = (d for n, d in H.degree()) degG = (d for n, d in G.degree()) assert_equal(sorted(degH), sorted(degG)) - H=convert_node_labels_to_integers(G,ordering="sorted", - label_attribute='label') - assert_equal(H.node[0]['label'],'A') - assert_equal(H.node[1]['label'],'B') - assert_equal(H.node[2]['label'],'C') - assert_equal(H.node[3]['label'],'D') + H = convert_node_labels_to_integers(G, ordering="sorted", + label_attribute='label') + assert_equal(H.node[0]['label'], 'A') + assert_equal(H.node[1]['label'], 'B') + assert_equal(H.node[2]['label'], 'C') + assert_equal(H.node[3]['label'], 'D') @raises(nx.NetworkXError) def test_convert_to_integers_raise(self): G = nx.Graph() - H=convert_node_labels_to_integers(G,ordering="increasing age") - + H = convert_node_labels_to_integers(G, ordering="increasing age") def test_relabel_nodes_copy(self): - G=empty_graph() - G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')]) - mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'} - H=relabel_nodes(G,mapping) + G = empty_graph() + G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) + mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'} + H = relabel_nodes(G, mapping) assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog']) def test_relabel_nodes_function(self): - G=empty_graph() - G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')]) + G = empty_graph() + G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) # function mapping no longer encouraged but works + def mapping(n): return ord(n) - H=relabel_nodes(G,mapping) + H = relabel_nodes(G, mapping) assert_nodes_equal(H.nodes(), [65, 66, 67, 68]) def test_relabel_nodes_graph(self): - G=Graph([('A','B'),('A','C'),('B','C'),('C','D')]) - mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'} - H=relabel_nodes(G,mapping) + G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) + mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'} + H = relabel_nodes(G, mapping) assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog']) + def test_relabel_nodes_orderedgraph(self): + G = OrderedGraph() + G.add_nodes_from([1, 2, 3]) + G.add_edges_from([(1, 3), (2, 3)]) + mapping = {1: 'a', 2: 'b', 3: 'c'} + H = relabel_nodes(G, mapping) + assert list(H.nodes) == ['a', 'b', 'c'] + def test_relabel_nodes_digraph(self): - G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')]) - mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'} - H=relabel_nodes(G,mapping,copy=False) + G = DiGraph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) + mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'} + H = relabel_nodes(G, mapping, copy=False) assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog']) def test_relabel_nodes_multigraph(self): - G=MultiGraph([('a','b'),('a','b')]) - mapping={'a':'aardvark','b':'bear'} - G=relabel_nodes(G,mapping,copy=False) - assert_nodes_equal(G.nodes(),['aardvark', 'bear']) - assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')]) + G = MultiGraph([('a', 'b'), ('a', 'b')]) + mapping = {'a': 'aardvark', 'b': 'bear'} + G = relabel_nodes(G, mapping, copy=False) + assert_nodes_equal(G.nodes(), ['aardvark', 'bear']) + assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')]) def test_relabel_nodes_multidigraph(self): - G=MultiDiGraph([('a','b'),('a','b')]) - mapping={'a':'aardvark','b':'bear'} - G=relabel_nodes(G,mapping,copy=False) - assert_nodes_equal(G.nodes(),['aardvark', 'bear']) - assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')]) + G = MultiDiGraph([('a', 'b'), ('a', 'b')]) + mapping = {'a': 'aardvark', 'b': 'bear'} + G = relabel_nodes(G, mapping, copy=False) + assert_nodes_equal(G.nodes(), ['aardvark', 'bear']) + assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')]) def test_relabel_isolated_nodes_to_same(self): - G=Graph() + G = Graph() G.add_nodes_from(range(4)) - mapping={1:1} - H=relabel_nodes(G, mapping, copy=False) + mapping = {1: 1} + H = relabel_nodes(G, mapping, copy=False) assert_nodes_equal(H.nodes(), list(range(4))) @raises(KeyError) def test_relabel_nodes_missing(self): - G=Graph([('A','B'),('A','C'),('B','C'),('C','D')]) - mapping={0:'aardvark'} - G=relabel_nodes(G,mapping,copy=False) + G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')]) + mapping = {0: 'aardvark'} + G = relabel_nodes(G, mapping, copy=False) def test_relabel_copy_name(self): - G=Graph() + G = Graph() H = relabel_nodes(G, {}, copy=True) assert_equal(H.graph, G.graph) H = relabel_nodes(G, {}, copy=False) @@ -161,22 +169,21 @@ def test_relabel_copy_name(self): assert_equal(H.graph, G.graph) def test_relabel_toposort(self): - K5=nx.complete_graph(4) - G=nx.complete_graph(4) - G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False) - nx.is_isomorphic(K5,G) - G=nx.complete_graph(4) - G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False) - nx.is_isomorphic(K5,G) - + K5 = nx.complete_graph(4) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, dict([(i, i + 1) for i in range(4)]), copy=False) + nx.is_isomorphic(K5, G) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, dict([(i, i - 1) for i in range(4)]), copy=False) + nx.is_isomorphic(K5, G) def test_relabel_selfloop(self): G = nx.DiGraph([(1, 1), (1, 2), (2, 3)]) G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False) - assert_nodes_equal(G.nodes(),['One','Three','Two']) + assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two']) G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)]) G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False) - assert_nodes_equal(G.nodes(),['One','Three','Two']) + assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two']) G = nx.MultiDiGraph([(1, 1)]) G = nx.relabel_nodes(G, {1: 0}, copy=False) assert_nodes_equal(G.nodes(), [0])
_relabel_copy and OrderedGraph The current behavior of `_relabel_copy()` is somewhat unintuitive for OrderedGraphs, where the nodes in the newly created graph are ordered according to the edge iterator of the original graph, not the node iterator. I think this would be fixed by placing `H.add_nodes_from(mapping.get(n, n) for n in G)` at the beginning of the function.
I agree. This would bring that function into line with other graph copy/subgraph code. I suggest the order should be: 1. update the graph attribute dict. 2. add nodes and update node attributes. 3. add edges and edge attributes. +1
2017-06-26T23:27:26
networkx/networkx
2,489
networkx__networkx-2489
[ "2442" ]
22a6ebaf0c235a825195e48558f39b65c26d5a1c
diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py --- a/networkx/classes/digraph.py +++ b/networkx/classes/digraph.py @@ -1124,7 +1124,8 @@ def reverse(self, copy=True): H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True)) H.graph = deepcopy(self.graph) - H._node = deepcopy(self._node) + for n in self._node: + H._node[n] = deepcopy(self._node[n]) else: self._pred, self._succ = self._succ, self._pred self._adj = self._succ
diff --git a/networkx/classes/tests/test_digraph.py b/networkx/classes/tests/test_digraph.py --- a/networkx/classes/tests/test_digraph.py +++ b/networkx/classes/tests/test_digraph.py @@ -1,173 +1,192 @@ #!/usr/bin/env python -from nose.tools import * -import networkx + +from nose.tools import assert_equal +from nose.tools import assert_false +from nose.tools import assert_true +from nose.tools import assert_raises + + +import networkx as nx +from networkx.testing import assert_nodes_equal from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph from test_graph import TestEdgeSubgraph as TestGraphEdgeSubgraph + class BaseDiGraphTester(BaseGraphTester): def test_has_successor(self): - G=self.K3 - assert_equal(G.has_successor(0,1),True) - assert_equal(G.has_successor(0,-1),False) + G = self.K3 + assert_equal(G.has_successor(0, 1), True) + assert_equal(G.has_successor(0, -1), False) def test_successors(self): - G=self.K3 - assert_equal(sorted(G.successors(0)),[1,2]) - assert_raises((KeyError,networkx.NetworkXError), G.successors,-1) + G = self.K3 + assert_equal(sorted(G.successors(0)), [1, 2]) + assert_raises((KeyError, nx.NetworkXError), G.successors, -1) def test_has_predecessor(self): - G=self.K3 - assert_equal(G.has_predecessor(0,1),True) - assert_equal(G.has_predecessor(0,-1),False) + G = self.K3 + assert_equal(G.has_predecessor(0, 1), True) + assert_equal(G.has_predecessor(0, -1), False) def test_predecessors(self): - G=self.K3 - assert_equal(sorted(G.predecessors(0)),[1,2]) - assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1) + G = self.K3 + assert_equal(sorted(G.predecessors(0)), [1, 2]) + assert_raises((KeyError, nx.NetworkXError), G.predecessors, -1) def test_edges(self): - G=self.K3 - assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) - assert_equal(sorted(G.edges(0)),[(0,1),(0,2)]) + G = self.K3 + assert_equal(sorted(G.edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]) + assert_equal(sorted(G.edges(0)), [(0, 1), (0, 2)]) assert_equal(sorted(G.edges([0, 1])), [(0, 1), (0, 2), (1, 0), (1, 2)]) - assert_raises((KeyError,networkx.NetworkXError), G.edges,-1) + assert_raises((KeyError, nx.NetworkXError), G.edges, -1) def test_edges_data(self): - G=self.K3 - all_edges = [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})] + G = self.K3 + all_edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})] assert_equal(sorted(G.edges(data=True)), all_edges) assert_equal(sorted(G.edges(0, data=True)), all_edges[:2]) assert_equal(sorted(G.edges([0, 1], data=True)), all_edges[:4]) - assert_raises((KeyError,networkx.NetworkXError), G.edges, -1, True) + assert_raises((KeyError, nx.NetworkXError), G.edges, -1, True) def test_out_edges(self): - G=self.K3 - assert_equal(sorted(G.out_edges()), - [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)]) - assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)]) - assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1) + G = self.K3 + assert_equal(sorted(G.out_edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]) + assert_equal(sorted(G.out_edges(0)), [(0, 1), (0, 2)]) + assert_raises((KeyError, nx.NetworkXError), G.out_edges, -1) def test_out_edges_dir(self): - G=self.P3 - assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)]) - assert_equal(sorted(G.out_edges(0)),[(0, 1)]) - assert_equal(sorted(G.out_edges(2)),[]) + G = self.P3 + assert_equal(sorted(G.out_edges()), [(0, 1), (1, 2)]) + assert_equal(sorted(G.out_edges(0)), [(0, 1)]) + assert_equal(sorted(G.out_edges(2)), []) def test_out_edges_data(self): - G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})]) - assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})]) - assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data' : 0})]) + G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})]) + assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})]) + assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data': 0})]) assert_equal(sorted(G.out_edges(data='data')), [(0, 1, 0), (1, 0, None)]) assert_equal(sorted(G.out_edges(0, data='data')), [(0, 1, 0)]) def test_in_edges_dir(self): - G=self.P3 + G = self.P3 assert_equal(sorted(G.in_edges()), [(0, 1), (1, 2)]) assert_equal(sorted(G.in_edges(0)), []) - assert_equal(sorted(G.in_edges(2)), [(1,2)]) + assert_equal(sorted(G.in_edges(2)), [(1, 2)]) def test_in_edges_data(self): - G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})]) - assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})]) - assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data' : 0})]) + G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})]) + assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})]) + assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data': 0})]) assert_equal(sorted(G.in_edges(data='data')), [(0, 1, 0), (1, 0, None)]) assert_equal(sorted(G.in_edges(1, data='data')), [(0, 1, 0)]) def test_degree(self): - G=self.K3 - assert_equal(sorted(G.degree()),[(0,4),(1,4),(2,4)]) - assert_equal(dict(G.degree()),{0:4,1:4,2:4}) + G = self.K3 + assert_equal(sorted(G.degree()), [(0, 4), (1, 4), (2, 4)]) + assert_equal(dict(G.degree()), {0: 4, 1: 4, 2: 4}) assert_equal(G.degree(0), 4) - assert_equal(list(G.degree(iter([0]))), [(0, 4)]) #run through iterator + assert_equal(list(G.degree(iter([0]))), [ + (0, 4)]) # run through iterator def test_in_degree(self): - G=self.K3 - assert_equal(sorted(G.in_degree()),[(0,2),(1,2),(2,2)]) - assert_equal(dict(G.in_degree()),{0:2,1:2,2:2}) + G = self.K3 + assert_equal(sorted(G.in_degree()), [(0, 2), (1, 2), (2, 2)]) + assert_equal(dict(G.in_degree()), {0: 2, 1: 2, 2: 2}) assert_equal(G.in_degree(0), 2) - assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) #run through iterator + assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) # run through iterator def test_in_degree_weighted(self): - G=self.K3 - G.add_edge(0,1,weight=0.3,other=1.2) - assert_equal(sorted(G.in_degree(weight='weight')),[(0,2),(1,1.3),(2,2)]) - assert_equal(dict(G.in_degree(weight='weight')),{0:2,1:1.3,2:2}) - assert_equal(G.in_degree(1,weight='weight'), 1.3) - assert_equal(sorted(G.in_degree(weight='other')),[(0,2),(1,2.2),(2,2)]) - assert_equal(dict(G.in_degree(weight='other')),{0:2,1:2.2,2:2}) - assert_equal(G.in_degree(1,weight='other'), 2.2) - assert_equal(list(G.in_degree(iter([1]),weight='other')), [(1, 2.2)]) + G = self.K3 + G.add_edge(0, 1, weight=0.3, other=1.2) + assert_equal(sorted(G.in_degree(weight='weight')), [(0, 2), (1, 1.3), (2, 2)]) + assert_equal(dict(G.in_degree(weight='weight')), {0: 2, 1: 1.3, 2: 2}) + assert_equal(G.in_degree(1, weight='weight'), 1.3) + assert_equal(sorted(G.in_degree(weight='other')), [(0, 2), (1, 2.2), (2, 2)]) + assert_equal(dict(G.in_degree(weight='other')), {0: 2, 1: 2.2, 2: 2}) + assert_equal(G.in_degree(1, weight='other'), 2.2) + assert_equal(list(G.in_degree(iter([1]), weight='other')), [(1, 2.2)]) def test_out_degree_weighted(self): - G=self.K3 - G.add_edge(0,1,weight=0.3,other=1.2) - assert_equal(sorted(G.out_degree(weight='weight')),[(0,1.3),(1,2),(2,2)]) - assert_equal(dict(G.out_degree(weight='weight')),{0:1.3,1:2,2:2}) - assert_equal(G.out_degree(0,weight='weight'), 1.3) - assert_equal(sorted(G.out_degree(weight='other')),[(0,2.2),(1,2),(2,2)]) - assert_equal(dict(G.out_degree(weight='other')),{0:2.2,1:2,2:2}) - assert_equal(G.out_degree(0,weight='other'), 2.2) + G = self.K3 + G.add_edge(0, 1, weight=0.3, other=1.2) + assert_equal(sorted(G.out_degree(weight='weight')), [(0, 1.3), (1, 2), (2, 2)]) + assert_equal(dict(G.out_degree(weight='weight')), {0: 1.3, 1: 2, 2: 2}) + assert_equal(G.out_degree(0, weight='weight'), 1.3) + assert_equal(sorted(G.out_degree(weight='other')), [(0, 2.2), (1, 2), (2, 2)]) + assert_equal(dict(G.out_degree(weight='other')), {0: 2.2, 1: 2, 2: 2}) + assert_equal(G.out_degree(0, weight='other'), 2.2) assert_equal(list(G.out_degree(iter([0]), weight='other')), [(0, 2.2)]) def test_out_degree(self): - G=self.K3 - assert_equal(sorted(G.out_degree()),[(0,2),(1,2),(2,2)]) - assert_equal(dict(G.out_degree()),{0:2,1:2,2:2}) + G = self.K3 + assert_equal(sorted(G.out_degree()), [(0, 2), (1, 2), (2, 2)]) + assert_equal(dict(G.out_degree()), {0: 2, 1: 2, 2: 2}) assert_equal(G.out_degree(0), 2) assert_equal(list(G.out_degree(iter([0]))), [(0, 2)]) def test_size(self): - G=self.K3 - assert_equal(G.size(),6) - assert_equal(G.number_of_edges(),6) + G = self.K3 + assert_equal(G.size(), 6) + assert_equal(G.number_of_edges(), 6) def test_to_undirected_reciprocal(self): - G=self.Graph() - G.add_edge(1,2) - assert_true(G.to_undirected().has_edge(1,2)) - assert_false(G.to_undirected(reciprocal=True).has_edge(1,2)) - G.add_edge(2,1) - assert_true(G.to_undirected(reciprocal=True).has_edge(1,2)) + G = self.Graph() + G.add_edge(1, 2) + assert_true(G.to_undirected().has_edge(1, 2)) + assert_false(G.to_undirected(reciprocal=True).has_edge(1, 2)) + G.add_edge(2, 1) + assert_true(G.to_undirected(reciprocal=True).has_edge(1, 2)) def test_reverse_copy(self): - G=networkx.DiGraph([(0,1),(1,2)]) - R=G.reverse() - assert_equal(sorted(R.edges()),[(1,0),(2,1)]) - R.remove_edge(1,0) - assert_equal(sorted(R.edges()),[(2,1)]) - assert_equal(sorted(G.edges()),[(0,1),(1,2)]) + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse() + assert_equal(sorted(R.edges()), [(1, 0), (2, 1)]) + R.remove_edge(1, 0) + assert_equal(sorted(R.edges()), [(2, 1)]) + assert_equal(sorted(G.edges()), [(0, 1), (1, 2)]) def test_reverse_nocopy(self): - G=networkx.DiGraph([(0,1),(1,2)]) - R=G.reverse(copy=False) - assert_equal(sorted(R.edges()),[(1,0),(2,1)]) - R.remove_edge(1,0) - assert_equal(sorted(R.edges()),[(2,1)]) - assert_equal(sorted(G.edges()),[(2,1)]) - - -class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse(copy=False) + assert_equal(sorted(R.edges()), [(1, 0), (2, 1)]) + R.remove_edge(1, 0) + assert_equal(sorted(R.edges()), [(2, 1)]) + assert_equal(sorted(G.edges()), [(2, 1)]) + + def test_reverse_hashable(self): + class Foo(object): + pass + x = Foo() + y = Foo() + G = nx.DiGraph() + G.add_edge(x, y) + assert_nodes_equal(G.nodes(), G.reverse().nodes()) + assert_equal([(y, x)], list(G.reverse().edges())) + + +class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester): pass -class TestDiGraph(BaseAttrDiGraphTester,TestGraph): +class TestDiGraph(BaseAttrDiGraphTester, TestGraph): """Tests specific to dict-of-dict-of-dict digraph data structure""" + def setUp(self): - self.Graph=networkx.DiGraph + self.Graph = nx.DiGraph # build dict-of-dict-of-dict K3 - ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{}) - self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}} + ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} self.k3edges = [(0, 1), (0, 2), (1, 2)] self.k3nodes = [0, 1, 2] self.K3 = self.Graph() self.K3._adj = self.K3._succ = self.k3adj - self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}} + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} self.K3._node = {} self.K3._node[0] = {} self.K3._node[1] = {} self.K3._node[2] = {} - ed1,ed2 = ({},{}) + ed1, ed2 = ({}, {}) self.P3 = self.Graph() self.P3._adj = {0: {1: ed1}, 1: {2: ed2}, 2: {}} self.P3._succ = self.P3._adj @@ -178,48 +197,48 @@ def setUp(self): self.P3._node[2] = {} def test_data_input(self): - G=self.Graph(data={1:[2],2:[1]}, name="test") - assert_equal(G.name,"test") - assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})]) - assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})]) - assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})]) + G = self.Graph(data={1: [2], 2: [1]}, name="test") + assert_equal(G.name, "test") + assert_equal(sorted(G.adj.items()), [(1, {2: {}}), (2, {1: {}})]) + assert_equal(sorted(G.succ.items()), [(1, {2: {}}), (2, {1: {}})]) + assert_equal(sorted(G.pred.items()), [(1, {2: {}}), (2, {1: {}})]) def test_add_edge(self): - G=self.Graph() - G.add_edge(0,1) - assert_equal(G.adj,{0: {1: {}}, 1: {}}) - assert_equal(G.succ,{0: {1: {}}, 1: {}}) - assert_equal(G.pred,{0: {}, 1: {0:{}}}) - G=self.Graph() - G.add_edge(*(0,1)) - assert_equal(G.adj,{0: {1: {}}, 1: {}}) - assert_equal(G.succ,{0: {1: {}}, 1: {}}) - assert_equal(G.pred,{0: {}, 1: {0:{}}}) + G = self.Graph() + G.add_edge(0, 1) + assert_equal(G.adj, {0: {1: {}}, 1: {}}) + assert_equal(G.succ, {0: {1: {}}, 1: {}}) + assert_equal(G.pred, {0: {}, 1: {0: {}}}) + G = self.Graph() + G.add_edge(*(0, 1)) + assert_equal(G.adj, {0: {1: {}}, 1: {}}) + assert_equal(G.succ, {0: {1: {}}, 1: {}}) + assert_equal(G.pred, {0: {}, 1: {0: {}}}) def test_add_edges_from(self): - G=self.Graph() - G.add_edges_from([(0,1),(0,2,{'data':3})],data=2) - assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}}) - assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}}) - assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}}) + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {'data': 3})], data=2) + assert_equal(G.adj, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}}) + assert_equal(G.succ, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}}) + assert_equal(G.pred, {0: {}, 1: {0: {'data': 2}}, 2: {0: {'data': 3}}}) - assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple - assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple - assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple + assert_raises(nx.NetworkXError, G.add_edges_from, [(0,)]) # too few in tuple + assert_raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3)]) # too many in tuple + assert_raises(TypeError, G.add_edges_from, [0]) # not a tuple def test_remove_edge(self): - G=self.K3 - G.remove_edge(0,1) - assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}}) - assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}}) - assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0) + G = self.K3 + G.remove_edge(0, 1) + assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}}) + assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}}) + assert_raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) def test_remove_edges_from(self): - G=self.K3 - G.remove_edges_from([(0,1)]) - assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}}) - assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}}) - G.remove_edges_from([(0,0)]) # silent fail + G = self.K3 + G.remove_edges_from([(0, 1)]) + assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}}) + assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}}) + G.remove_edges_from([(0, 0)]) # silent fail class TestEdgeSubgraph(TestGraphEdgeSubgraph): @@ -227,7 +246,7 @@ class TestEdgeSubgraph(TestGraphEdgeSubgraph): def setup(self): # Create a doubly-linked path graph on five nodes. - G = networkx.DiGraph(networkx.path_graph(5)) + G = nx.DiGraph(nx.path_graph(5)) # Add some node, edge, and graph attributes. for i in range(5): G.node[i]['name'] = 'node{}'.format(i) @@ -244,7 +263,7 @@ def test_pred_succ(self): For more information, see GitHub issue #2370. """ - G = networkx.DiGraph() + G = nx.DiGraph() G.add_edge(0, 1) H = G.edge_subgraph([(0, 1)]) assert_equal(list(H.predecessors(0)), [])
Reverse breaks DiGraphs with default-hashed objects The NetworkX documentation [states that graph nodes can be any hashable object](https://networkx.github.io/documentation/networkx-1.10/reference/introduction.html#networkx-basics), and the glossary [defines hashable](https://networkx.github.io/documentation/networkx-1.10/reference/glossary.html#term-hashable) per the Python definition (that is, implements `__hash__` and a comparison method). It goes out of its way to state that instances of user-defined classes are hashable (and unique) by default: > All of Python’s immutable built-in objects are hashable, while no mutable containers (such as lists or dictionaries) are. **Objects which are instances of user-defined classes are hashable by default; they all compare unequal, and their hash value is their id().** That would seem to suggest that NetworkX supports graphs made of default-hashed objects (and nodes are considered the same if they're reference-equal). However, I found that the `reverse` method doesn't respect this property. Example: ```python import networkx as nx class Unique(object): pass u = Unique() g = nx.DiGraph() g.add_edge("A", u) assert "A" in g assert u in g # passes g.nodes() # ['A', <__main__.Unique object at 0x102e0fe10>] g.edges() # [('A', <__main__.Unique object at 0x102e0fe10>)] rev = g.reverse() assert "A" in rev assert u in rev # fails ಠ_ಠ rev.nodes() # ['A', <__main__.Unique object at 0x102e0fd10>] rev.edges() # [(<__main__.Unique object at 0x102e0fe10>, 'A')] # Unique address is different in nodes() and edges(): `fd10` vs `fe10` ಠ_ಠ ``` What seems to happen is that `reverse` "deep copies" the graph (this is where it calls `Unique.__new__`, at least), and then seemingly doesn't keep track of objects that it has already copied.
See #2282. @hagberg I think this is a slightly different issue, one with DiGraph.reverse. The test case here should be something like ```python def test_reverse_hashable(self): class Foo(object): pass x = Foo() y = Foo() G = nx.DiGraph() G.add_edge(x, y) self.assertCountEqual(G.nodes(), G.reverse().nodes()) self.assertCountEqual(map(reversed, G.edges()), G.reverse().edges()) ``` Is the contract for `DiGraph.reverse` supposed to be that the nodes are the exact same objects? @hagberg #2282 looks different to me. The core issue was that distinct objects with the same hash that compared equal were referenced inconsistently between nodes and edges. The reporter said that this was probably not a bug, and I'd tend to agree with that, since the graph contract is that nodes are distinguished with their hash and equality property. In this case, `reverse` appears to make *two copies of one single object* whose hash and equality are derived from its `id()`, destroying the equality relationship in the process. That result is inconsistent with the promise that NetworkX graphs can take objects that use the default hash and equality. I see what you are saying. You are right - this is different than that earlier issue and it is a bug. I think the fix should be in digraph.reverse() ```python # H.node=deepcopy(self.node) for n in self.node: H.node[n]=deepcopy(self.node[n]) ``` Thanks for your careful look at this. This just needs a PR with @hagberg fix and add @jfinkels test to the tests. I appreciate that this is getting attention. Unfortunately, my company makes it absurdly hard to contribute source code to open-source projects. Pointing out issues is about as much as I can do, right now. 😕
2017-06-27T06:49:52
networkx/networkx
2,490
networkx__networkx-2490
[ "2111", "2111" ]
144f8172770ff7cda91b2aa12eceb672abd5d680
diff --git a/networkx/convert.py b/networkx/convert.py --- a/networkx/convert.py +++ b/networkx/convert.py @@ -24,13 +24,14 @@ import warnings import networkx as nx __author__ = """\n""".join(['Aric Hagberg <[email protected]>', - 'Pieter Swart ([email protected])', - 'Dan Schult([email protected])']) + 'Pieter Swart ([email protected])', + 'Dan Schult([email protected])']) __all__ = ['to_networkx_graph', 'from_dict_of_dicts', 'to_dict_of_dicts', 'from_dict_of_lists', 'to_dict_of_lists', 'from_edgelist', 'to_edgelist'] + def _prep_create_using(create_using): """Return a graph object ready to be populated. @@ -47,7 +48,8 @@ def _prep_create_using(create_using): raise TypeError("Input graph is not a networkx graph type") return create_using -def to_networkx_graph(data,create_using=None,multigraph_input=False): + +def to_networkx_graph(data, create_using=None, multigraph_input=False): """Make a NetworkX graph from a known data structure. The preferred way to call this is automatically @@ -67,8 +69,9 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): Current known types are: any NetworkX graph dict-of-dicts - dist-of-lists + dict-of-lists list of edges + Pandas DataFrame (row per edge) numpy matrix numpy ndarray scipy sparse matrix @@ -85,45 +88,45 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): """ # NX graph - if hasattr(data,"adj"): + if hasattr(data, "adj"): try: - result= from_dict_of_dicts(data.adj,\ - create_using=create_using,\ - multigraph_input=data.is_multigraph()) - if hasattr(data,'graph'): # data.graph should be dict-like + result = from_dict_of_dicts(data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph()) + if hasattr(data, 'graph'): # data.graph should be dict-like result.graph.update(data.graph) - if hasattr(data,'node'): # data.node should be dict-like - result._node.update( (n,dd.copy()) for n,dd in data.node.items() ) + if hasattr(data, 'node'): # data.node should be dict-like + result._node.update((n, dd.copy()) for n, dd in data.node.items()) return result except: raise nx.NetworkXError("Input is not a correct NetworkX graph.") # pygraphviz agraph - if hasattr(data,"is_strict"): + if hasattr(data, "is_strict"): try: - return nx.nx_agraph.from_agraph(data,create_using=create_using) + return nx.nx_agraph.from_agraph(data, create_using=create_using) except: raise nx.NetworkXError("Input is not a correct pygraphviz graph.") # dict of dicts/lists - if isinstance(data,dict): + if isinstance(data, dict): try: - return from_dict_of_dicts(data,create_using=create_using,\ - multigraph_input=multigraph_input) + return from_dict_of_dicts(data, create_using=create_using, + multigraph_input=multigraph_input) except: try: - return from_dict_of_lists(data,create_using=create_using) + return from_dict_of_lists(data, create_using=create_using) except: raise TypeError("Input is not known type.") # list or generator of edges - if (isinstance(data, list) - or isinstance(data, tuple) - or hasattr(data, '_adjdict') - or hasattr(data, 'next') - or hasattr(data, '__next__')): + if (isinstance(data, list) or + isinstance(data, tuple) or + hasattr(data, '_adjdict') or + hasattr(data, 'next') or + hasattr(data, '__next__')): try: - return from_edgelist(data,create_using=create_using) + return from_edgelist(data, create_using=create_using) except: raise nx.NetworkXError("Input is not a valid edge list") @@ -132,7 +135,7 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): import pandas as pd if isinstance(data, pd.DataFrame): try: - return nx.from_pandas_dataframe(data, create_using=create_using) + return nx.from_pandas_dataframe(data, edge_attr=True, create_using=create_using) except: msg = "Input is not a correct Pandas DataFrame." raise nx.NetworkXError(msg) @@ -143,13 +146,12 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): # numpy matrix or ndarray try: import numpy - if isinstance(data,numpy.matrix) or \ - isinstance(data,numpy.ndarray): + if isinstance(data, numpy.matrix) or isinstance(data, numpy.ndarray): try: - return nx.from_numpy_matrix(data,create_using=create_using) + return nx.from_numpy_matrix(data, create_using=create_using) except: - raise nx.NetworkXError(\ - "Input is not a correct numpy matrix or array.") + raise nx.NetworkXError( + "Input is not a correct numpy matrix or array.") except ImportError: warnings.warn('numpy not found, skipping conversion test.', ImportWarning) @@ -157,24 +159,23 @@ def to_networkx_graph(data,create_using=None,multigraph_input=False): # scipy sparse matrix - any format try: import scipy - if hasattr(data,"format"): + if hasattr(data, "format"): try: - return nx.from_scipy_sparse_matrix(data,create_using=create_using) + return nx.from_scipy_sparse_matrix(data, create_using=create_using) except: - raise nx.NetworkXError(\ - "Input is not a correct scipy sparse matrix type.") + raise nx.NetworkXError( + "Input is not a correct scipy sparse matrix type.") except ImportError: warnings.warn('scipy not found, skipping conversion test.', ImportWarning) - - raise nx.NetworkXError(\ - "Input is not a known data type for conversion.") + raise nx.NetworkXError( + "Input is not a known data type for conversion.") return -def to_dict_of_lists(G,nodelist=None): +def to_dict_of_lists(G, nodelist=None): """Return adjacency representation of graph as a dictionary of lists. Parameters @@ -191,14 +192,15 @@ def to_dict_of_lists(G,nodelist=None): """ if nodelist is None: - nodelist=G + nodelist = G d = {} for n in nodelist: - d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist] + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] return d -def from_dict_of_lists(d,create_using=None): + +def from_dict_of_lists(d, create_using=None): """Return a graph from a dictionary of lists. Parameters @@ -218,25 +220,25 @@ def from_dict_of_lists(d,create_using=None): >>> G=nx.Graph(dol) # use Graph constructor """ - G=_prep_create_using(create_using) + G = _prep_create_using(create_using) G.add_nodes_from(d) if G.is_multigraph() and not G.is_directed(): # a dict_of_lists can't show multiedges. BUT for undirected graphs, # each edge shows up twice in the dict_of_lists. # So we need to treat this case separately. - seen={} - for node,nbrlist in d.items(): + seen = {} + for node, nbrlist in d.items(): for nbr in nbrlist: if nbr not in seen: - G.add_edge(node,nbr) - seen[node]=1 # don't allow reverse edge to show up + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up else: - G.add_edges_from( ((node,nbr) for node,nbrlist in d.items() - for nbr in nbrlist) ) + G.add_edges_from(((node, nbr) for node, nbrlist in d.items() + for nbr in nbrlist)) return G -def to_dict_of_dicts(G,nodelist=None,edge_data=None): +def to_dict_of_dicts(G, nodelist=None, edge_data=None): """Return adjacency representation of graph as a dictionary of dictionaries. Parameters @@ -254,28 +256,29 @@ def to_dict_of_dicts(G,nodelist=None,edge_data=None): If edgedata is None, the edgedata in G is used to fill the values. If G is a multigraph, the edgedata is a dict for each pair (u,v). """ - dod={} + dod = {} if nodelist is None: if edge_data is None: - for u,nbrdict in G.adjacency(): - dod[u]=nbrdict.copy() - else: # edge_data is not None - for u,nbrdict in G.adjacency(): - dod[u]=dod.fromkeys(nbrdict, edge_data) - else: # nodelist is not None + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None if edge_data is None: for u in nodelist: - dod[u]={} - for v,data in ((v,data) for v,data in G[u].items() if v in nodelist): - dod[u][v]=data - else: # nodelist and edge_data are not None + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None for u in nodelist: - dod[u]={} - for v in ( v for v in G[u] if v in nodelist): - dod[u][v]=edge_data + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data return dod -def from_dict_of_dicts(d,create_using=None,multigraph_input=False): + +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): """Return a graph from a dictionary of dictionaries. Parameters @@ -300,62 +303,63 @@ def from_dict_of_dicts(d,create_using=None,multigraph_input=False): >>> G=nx.Graph(dod) # use Graph constructor """ - G=_prep_create_using(create_using) + G = _prep_create_using(create_using) G.add_nodes_from(d) # is dict a MultiGraph or MultiDiGraph? if multigraph_input: # make a copy of the list of edge data (but not the edge data) if G.is_directed(): if G.is_multigraph(): - G.add_edges_from( (u,v,key,data) - for u,nbrs in d.items() - for v,datadict in nbrs.items() - for key,data in datadict.items() - ) + G.add_edges_from((u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) else: - G.add_edges_from( (u,v,data) - for u,nbrs in d.items() - for v,datadict in nbrs.items() - for key,data in datadict.items() - ) - else: # Undirected + G.add_edges_from((u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected if G.is_multigraph(): - seen=set() # don't add both directions of undirected graph - for u,nbrs in d.items(): - for v,datadict in nbrs.items(): - if (u,v) not in seen: - G.add_edges_from( (u,v,key,data) - for key,data in datadict.items() - ) - seen.add((v,u)) + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from((u, v, key, data) + for key, data in datadict.items() + ) + seen.add((v, u)) else: - seen=set() # don't add both directions of undirected graph - for u,nbrs in d.items(): - for v,datadict in nbrs.items(): - if (u,v) not in seen: - G.add_edges_from( (u,v,data) - for key,data in datadict.items() ) - seen.add((v,u)) - - else: # not a multigraph to multigraph transfer + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from((u, v, data) + for key, data in datadict.items()) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer if G.is_multigraph() and not G.is_directed(): # d can have both representations u-v, v-u in dict. Only add one. # We don't need this check for digraphs since we add both directions, # or for Graph() since it is done implicitly (parallel edges not allowed) - seen=set() - for u,nbrs in d.items(): - for v,data in nbrs.items(): - if (u,v) not in seen: - G.add_edge(u,v,key=0) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) G[u][v][0].update(data) - seen.add((v,u)) + seen.add((v, u)) else: - G.add_edges_from( ( (u,v,data) - for u,nbrs in d.items() - for v,data in nbrs.items()) ) + G.add_edges_from(((u, v, data) + for u, nbrs in d.items() + for v, data in nbrs.items())) return G -def to_edgelist(G,nodelist=None): + +def to_edgelist(G, nodelist=None): """Return a list of edges in the graph. Parameters @@ -370,9 +374,10 @@ def to_edgelist(G,nodelist=None): if nodelist is None: return G.edges(data=True) else: - return G.edges(nodelist,data=True) + return G.edges(nodelist, data=True) + -def from_edgelist(edgelist,create_using=None): +def from_edgelist(edgelist, create_using=None): """Return a graph from a list of edges. Parameters @@ -392,6 +397,6 @@ def from_edgelist(edgelist,create_using=None): >>> G=nx.Graph(edgelist) # use Graph constructor """ - G=_prep_create_using(create_using) + G = _prep_create_using(create_using) G.add_edges_from(edgelist) return G diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py --- a/networkx/convert_matrix.py +++ b/networkx/convert_matrix.py @@ -32,13 +32,14 @@ from networkx.convert import _prep_create_using from networkx.utils import not_implemented_for __author__ = """\n""".join(['Aric Hagberg <[email protected]>', - 'Pieter Swart ([email protected])', - 'Dan Schult([email protected])']) + 'Pieter Swart ([email protected])', + 'Dan Schult([email protected])']) __all__ = ['from_numpy_matrix', 'to_numpy_matrix', 'from_pandas_dataframe', 'to_pandas_dataframe', 'to_numpy_recarray', 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix'] + def to_pandas_dataframe(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): """Return the graph adjacency matrix as a Pandas DataFrame. @@ -127,9 +128,9 @@ def to_pandas_dataframe(G, nodelist=None, dtype=None, order=None, return pd.DataFrame(data=M, index=nodelist, columns=nodelist) -def from_pandas_dataframe(df, source, target, edge_attr=None, - create_using=None): - """Return a graph from Pandas DataFrame. +def from_pandas_dataframe(df, source='source', target='target', edge_attr=None, + create_using=None): + """Return a graph from Pandas DataFrame containing an edge list. The Pandas DataFrame should contain at least two columns of node names and zero or more columns of node attributes. Each row will be processed as one @@ -189,6 +190,13 @@ def from_pandas_dataframe(df, source, target, edge_attr=None, 10 >>> G['E']['C']['cost'] 9 + >>> edges = pd.DataFrame({'source': [0, 1, 2], + ... 'target': [2, 2, 3], + ... 'weight': [3, 4, 5], + ... 'color': ['red', 'blue', 'blue']}) + >>> G = nx.from_pandas_dataframe(edges, edge_attr=True) + >>> G[0][2]['color'] + 'red' """ g = _prep_create_using(create_using) @@ -210,7 +218,7 @@ def from_pandas_dataframe(df, source, target, edge_attr=None, edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr] # If a string or int is passed else: - edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),] + edge_i = [(edge_attr, df.columns.get_loc(edge_attr)), ] # Iteration on values returns the rows as Numpy arrays for row in df.values: @@ -230,6 +238,7 @@ def from_pandas_dataframe(df, source, target, edge_attr=None, return g + def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): """Return the graph adjacency matrix as a NumPy matrix. @@ -328,9 +337,9 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) - nlen=len(nodelist) + nlen = len(nodelist) undirected = not G.is_directed() - index=dict(zip(nodelist,range(nlen))) + index = dict(zip(nodelist, range(nlen))) # Initially, we start with an array of nans. Then we populate the matrix # using data from the graph. Afterwards, any leftover nans will be @@ -368,26 +377,26 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, # Handle MultiGraphs and MultiDiGraphs M = np.full((nlen, nlen), np.nan, order=order) # use numpy nan-aware operations - operator={sum:np.nansum, min:np.nanmin, max:np.nanmax} + operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax} try: - op=operator[multigraph_weight] + op = operator[multigraph_weight] except: raise ValueError('multigraph_weight must be sum, min, or max') - for u,v,attrs in G.edges(data=True): + for u, v, attrs in G.edges(data=True): if (u in nodeset) and (v in nodeset): i, j = index[u], index[v] e_weight = attrs.get(weight, 1) - M[i,j] = op([e_weight, M[i,j]]) + M[i, j] = op([e_weight, M[i, j]]) if undirected: - M[j,i] = M[i,j] + M[j, i] = M[i, j] else: # Graph or DiGraph, this is much faster than above M = np.full((nlen, nlen), np.nan, order=order) - for u,nbrdict in G.adjacency(): - for v,d in nbrdict.items(): + for u, nbrdict in G.adjacency(): + for v, d in nbrdict.items(): try: - M[index[u],index[v]] = d.get(weight,1) + M[index[u], index[v]] = d.get(weight, 1) except KeyError: # This occurs when there are fewer desired nodes than # there are nodes in the graph: len(nodelist) < len(G) @@ -485,28 +494,28 @@ def from_numpy_matrix(A, parallel_edges=False, create_using=None): """ # This should never fail if you have created a numpy matrix with numpy... import numpy as np - kind_to_python_type={'f':float, - 'i':int, - 'u':int, - 'b':bool, - 'c':complex, - 'S':str, - 'V':'void'} - try: # Python 3.x - blurb = chr(1245) # just to trigger the exception - kind_to_python_type['U']=str - except ValueError: # Python 2.6+ - kind_to_python_type['U']=unicode - G=_prep_create_using(create_using) - n,m=A.shape - if n!=m: + kind_to_python_type = {'f': float, + 'i': int, + 'u': int, + 'b': bool, + 'c': complex, + 'S': str, + 'V': 'void'} + try: # Python 3.x + blurb = chr(1245) # just to trigger the exception + kind_to_python_type['U'] = str + except ValueError: # Python 2.6+ + kind_to_python_type['U'] = unicode + G = _prep_create_using(create_using) + n, m = A.shape + if n != m: raise nx.NetworkXError("Adjacency matrix is not square.", - "nx,ny=%s"%(A.shape,)) - dt=A.dtype + "nx,ny=%s" % (A.shape,)) + dt = A.dtype try: - python_type=kind_to_python_type[dt.kind] + python_type = kind_to_python_type[dt.kind] except: - raise TypeError("Unknown numpy data type: %s"%dt) + raise TypeError("Unknown numpy data type: %s" % dt) # Make sure we get even the isolated nodes of the graph. G.add_nodes_from(range(n)) @@ -607,19 +616,19 @@ def to_numpy_recarray(G, nodelist=None, dtype=None, order=None): if len(nodelist) != len(nodeset): msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) - nlen=len(nodelist) + nlen = len(nodelist) undirected = not G.is_directed() - index=dict(zip(nodelist,range(nlen))) - M = np.zeros((nlen,nlen), dtype=dtype, order=order) + index = dict(zip(nodelist, range(nlen))) + M = np.zeros((nlen, nlen), dtype=dtype, order=order) - names=M.dtype.names - for u,v,attrs in G.edges(data=True): + names = M.dtype.names + for u, v, attrs in G.edges(data=True): if (u in nodeset) and (v in nodeset): - i,j = index[u],index[v] - values=tuple([attrs[n] for n in names]) - M[i,j] = values + i, j = index[u], index[v] + values = tuple([attrs[n] for n in names]) + M[i, j] = values if undirected: - M[j,i] = M[i,j] + M[j, i] = M[i, j] return M.view(np.recarray) @@ -717,19 +726,19 @@ def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) - index = dict(zip(nodelist,range(nlen))) - coefficients = zip(*((index[u],index[v],d.get(weight,1)) - for u,v,d in G.edges(nodelist, data=True) + index = dict(zip(nodelist, range(nlen))) + coefficients = zip(*((index[u], index[v], d.get(weight, 1)) + for u, v, d in G.edges(nodelist, data=True) if u in index and v in index)) try: - row,col,data = coefficients + row, col, data = coefficients except ValueError: # there is no edge in the subgraph - row,col,data = [],[],[] + row, col, data = [], [], [] if G.is_directed(): - M = sparse.coo_matrix((data,(row,col)), - shape=(nlen,nlen), dtype=dtype) + M = sparse.coo_matrix((data, (row, col)), + shape=(nlen, nlen), dtype=dtype) else: # symmetrize matrix d = data + data @@ -739,17 +748,17 @@ def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, # so we subtract the data on the diagonal selfloops = list(G.selfloop_edges(data=True)) if selfloops: - diag_index,diag_data = zip(*((index[u],-d.get(weight,1)) - for u,v,d in selfloops - if u in index and v in index)) + diag_index, diag_data = zip(*((index[u], -d.get(weight, 1)) + for u, v, d in selfloops + if u in index and v in index)) d += diag_data r += diag_index c += diag_index - M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype) + M = sparse.coo_matrix((d, (r, c)), shape=(nlen, nlen), dtype=dtype) try: return M.asformat(format) except AttributeError: - raise nx.NetworkXError("Unknown sparse matrix format: %s"%format) + raise nx.NetworkXError("Unknown sparse matrix format: %s" % format) def _csr_gen_triples(A): @@ -760,7 +769,7 @@ def _csr_gen_triples(A): nrows = A.shape[0] data, indices, indptr = A.data, A.indices, A.indptr for i in range(nrows): - for j in range(indptr[i], indptr[i+1]): + for j in range(indptr[i], indptr[i + 1]): yield i, indices[j], data[j] @@ -772,7 +781,7 @@ def _csc_gen_triples(A): ncols = A.shape[1] data, indices, indptr = A.data, A.indices, A.indptr for i in range(ncols): - for j in range(indptr[i], indptr[i+1]): + for j in range(indptr[i], indptr[i + 1]): yield indices[j], i, data[j] @@ -877,10 +886,10 @@ def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None, """ G = _prep_create_using(create_using) - n,m = A.shape + n, m = A.shape if n != m: - raise nx.NetworkXError(\ - "Adjacency matrix is not square. nx,ny=%s"%(A.shape,)) + raise nx.NetworkXError( + "Adjacency matrix is not square. nx,ny=%s" % (A.shape,)) # Make sure we get even the isolated nodes of the graph. G.add_nodes_from(range(n)) # Create an iterable over (u, v, w) triples and for each triple, add an
diff --git a/networkx/tests/test_convert.py b/networkx/tests/test_convert.py --- a/networkx/tests/test_convert.py +++ b/networkx/tests/test_convert.py @@ -1,198 +1,202 @@ #!/usr/bin/env python -from nose.tools import * -from networkx.testing import * -from networkx import * -from networkx.convert import * -from networkx.algorithms.operators import * -from networkx.generators.classic import barbell_graph,cycle_graph +from nose.tools import assert_equal, assert_not_equal, assert_true, assert_false + +import networkx as nx +from networkx.testing import assert_nodes_equal, assert_edges_equal, assert_graphs_equal +from networkx.convert import (to_networkx_graph, + to_dict_of_dicts, + from_dict_of_dicts, + to_dict_of_lists, + from_dict_of_lists) +from networkx.generators.classic import barbell_graph, cycle_graph + class TestConvert(): - def edgelists_equal(self,e1,e2): - return sorted(sorted(e) for e in e1)==sorted(sorted(e) for e in e2) + def edgelists_equal(self, e1, e2): + return sorted(sorted(e) for e in e1) == sorted(sorted(e) for e in e2) def test_simple_graphs(self): for dest, source in [(to_dict_of_dicts, from_dict_of_dicts), (to_dict_of_lists, from_dict_of_lists)]: - G=barbell_graph(10,3) - G.graph={} - dod=dest(G) + G = barbell_graph(10, 3) + G.graph = {} + dod = dest(G) # Dict of [dicts, lists] - GG=source(dod) - assert_graphs_equal(G,GG) - GW=to_networkx_graph(dod) - assert_graphs_equal(G,GW) - GI=Graph(dod) - assert_graphs_equal(G,GI) + GG = source(dod) + assert_graphs_equal(G, GG) + GW = to_networkx_graph(dod) + assert_graphs_equal(G, GW) + GI = nx.Graph(dod) + assert_graphs_equal(G, GI) # With nodelist keyword - P4=path_graph(4) - P3=path_graph(3) - P4.graph={} - P3.graph={} - dod=dest(P4,nodelist=[0,1,2]) - Gdod=Graph(dod) - assert_graphs_equal(Gdod,P3) + P4 = nx.path_graph(4) + P3 = nx.path_graph(3) + P4.graph = {} + P3.graph = {} + dod = dest(P4, nodelist=[0, 1, 2]) + Gdod = nx.Graph(dod) + assert_graphs_equal(Gdod, P3) def test_digraphs(self): for dest, source in [(to_dict_of_dicts, from_dict_of_dicts), (to_dict_of_lists, from_dict_of_lists)]: - G=cycle_graph(10) + G = cycle_graph(10) # Dict of [dicts, lists] - dod=dest(G) - GG=source(dod) + dod = dest(G) + GG = source(dod) assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GG.edges())) - GW=to_networkx_graph(dod) + GW = to_networkx_graph(dod) assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GW.edges())) - GI=Graph(dod) + GI = nx.Graph(dod) assert_nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GI.edges())) - G=cycle_graph(10,create_using=DiGraph()) - dod=dest(G) - GG=source(dod, create_using=DiGraph()) + G = cycle_graph(10, create_using=nx.DiGraph()) + dod = dest(G) + GG = source(dod, create_using=nx.DiGraph()) assert_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_equal(sorted(G.edges()), sorted(GG.edges())) - GW=to_networkx_graph(dod, create_using=DiGraph()) + GW = to_networkx_graph(dod, create_using=nx.DiGraph()) assert_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_equal(sorted(G.edges()), sorted(GW.edges())) - GI=DiGraph(dod) + GI = nx.DiGraph(dod) assert_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_equal(sorted(G.edges()), sorted(GI.edges())) def test_graph(self): - g = cycle_graph(10) - G = Graph() + g = nx.cycle_graph(10) + G = nx.Graph() G.add_nodes_from(g) - G.add_weighted_edges_from((u, v, u) for u,v in g.edges()) + G.add_weighted_edges_from((u, v, u) for u, v in g.edges()) # Dict of dicts - dod=to_dict_of_dicts(G) - GG=from_dict_of_dicts(dod,create_using=Graph()) + dod = to_dict_of_dicts(G) + GG = from_dict_of_dicts(dod, create_using=nx.Graph()) assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GG.edges())) - GW=to_networkx_graph(dod,create_using=Graph()) + GW = to_networkx_graph(dod, create_using=nx.Graph()) assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GW.edges())) - GI=Graph(dod) + GI = nx.Graph(dod) assert_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_equal(sorted(G.edges()), sorted(GI.edges())) # Dict of lists - dol=to_dict_of_lists(G) - GG=from_dict_of_lists(dol,create_using=Graph()) + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.Graph()) # dict of lists throws away edge data so set it to none - enone=[(u,v,{}) for (u,v,d) in G.edges(data=True)] + enone = [(u, v, {}) for (u, v, d) in G.edges(data=True)] assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_edges_equal(enone, sorted(GG.edges(data=True))) - GW=to_networkx_graph(dol,create_using=Graph()) + GW = to_networkx_graph(dol, create_using=nx.Graph()) assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_edges_equal(enone, sorted(GW.edges(data=True))) - GI=Graph(dol) + GI = nx.Graph(dol) assert_nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_edges_equal(enone, sorted(GI.edges(data=True))) - def test_with_multiedges_self_loops(self): G = cycle_graph(10) - XG = Graph() + XG = nx.Graph() XG.add_nodes_from(G) - XG.add_weighted_edges_from((u, v, u) for u,v in G.edges()) - XGM=MultiGraph() + XG.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM = nx.MultiGraph() XGM.add_nodes_from(G) - XGM.add_weighted_edges_from((u, v, u) for u,v in G.edges()) - XGM.add_edge(0,1,weight=2) # multiedge - XGS = Graph() + XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM.add_edge(0, 1, weight=2) # multiedge + XGS = nx.Graph() XGS.add_nodes_from(G) - XGS.add_weighted_edges_from((u, v, u) for u,v in G.edges()) - XGS.add_edge(0,0,weight=100) # self loop + XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGS.add_edge(0, 0, weight=100) # self loop # Dict of dicts # with self loops, OK - dod=to_dict_of_dicts(XGS) - GG=from_dict_of_dicts(dod,create_using=Graph()) + dod = to_dict_of_dicts(XGS) + GG = from_dict_of_dicts(dod, create_using=nx.Graph()) assert_nodes_equal(XGS.nodes(), GG.nodes()) assert_edges_equal(XGS.edges(), GG.edges()) - GW=to_networkx_graph(dod,create_using=Graph()) + GW = to_networkx_graph(dod, create_using=nx.Graph()) assert_nodes_equal(XGS.nodes(), GW.nodes()) assert_edges_equal(XGS.edges(), GW.edges()) - GI=Graph(dod) + GI = nx.Graph(dod) assert_nodes_equal(XGS.nodes(), GI.nodes()) assert_edges_equal(XGS.edges(), GI.edges()) # Dict of lists # with self loops, OK - dol=to_dict_of_lists(XGS) - GG=from_dict_of_lists(dol,create_using=Graph()) + dol = to_dict_of_lists(XGS) + GG = from_dict_of_lists(dol, create_using=nx.Graph()) # dict of lists throws away edge data so set it to none - enone=[(u,v,{}) for (u,v,d) in XGS.edges(data=True)] + enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)] assert_nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes())) assert_edges_equal(enone, sorted(GG.edges(data=True))) - GW=to_networkx_graph(dol,create_using=Graph()) + GW = to_networkx_graph(dol, create_using=nx.Graph()) assert_nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes())) assert_edges_equal(enone, sorted(GW.edges(data=True))) - GI=Graph(dol) + GI = nx.Graph(dol) assert_nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes())) assert_edges_equal(enone, sorted(GI.edges(data=True))) # Dict of dicts # with multiedges, OK - dod=to_dict_of_dicts(XGM) - GG=from_dict_of_dicts(dod,create_using=MultiGraph(), - multigraph_input=True) + dod = to_dict_of_dicts(XGM) + GG = from_dict_of_dicts(dod, create_using=nx.MultiGraph(), + multigraph_input=True) assert_nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GG.edges())) - GW=to_networkx_graph(dod,create_using=MultiGraph(),multigraph_input=True) + GW = to_networkx_graph(dod, create_using=nx.MultiGraph(), multigraph_input=True) assert_nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GW.edges())) - GI=MultiGraph(dod) # convert can't tell whether to duplicate edges! + GI = nx.MultiGraph(dod) # convert can't tell whether to duplicate edges! assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) #assert_not_equal(sorted(XGM.edges()), sorted(GI.edges())) assert_false(sorted(XGM.edges()) == sorted(GI.edges())) - GE=from_dict_of_dicts(dod,create_using=MultiGraph(), - multigraph_input=False) + GE = from_dict_of_dicts(dod, create_using=nx.MultiGraph(), + multigraph_input=False) assert_nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes())) assert_not_equal(sorted(XGM.edges()), sorted(GE.edges())) - GI=MultiGraph(XGM) + GI = nx.MultiGraph(XGM) assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GI.edges())) - GM=MultiGraph(G) + GM = nx.MultiGraph(G) assert_nodes_equal(sorted(GM.nodes()), sorted(G.nodes())) assert_edges_equal(sorted(GM.edges()), sorted(G.edges())) # Dict of lists # with multiedges, OK, but better write as DiGraph else you'll # get double edges - dol=to_dict_of_lists(G) - GG=from_dict_of_lists(dol,create_using=MultiGraph()) + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.MultiGraph()) assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GG.edges())) - GW=to_networkx_graph(dol,create_using=MultiGraph()) + GW = to_networkx_graph(dol, create_using=nx.MultiGraph()) assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GW.edges())) - GI=MultiGraph(dol) + GI = nx.MultiGraph(dol) assert_nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GI.edges())) def test_edgelists(self): - P=path_graph(4) - e=[(0,1),(1,2),(2,3)] - G=Graph(e) + P = nx.path_graph(4) + e = [(0, 1), (1, 2), (2, 3)] + G = nx.Graph(e) assert_nodes_equal(sorted(G.nodes()), sorted(P.nodes())) assert_edges_equal(sorted(G.edges()), sorted(P.edges())) assert_edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) - e=[(0,1,{}),(1,2,{}),(2,3,{})] - G=Graph(e) + e = [(0, 1, {}), (1, 2, {}), (2, 3, {})] + G = nx.Graph(e) assert_nodes_equal(sorted(G.nodes()), sorted(P.nodes())) assert_edges_equal(sorted(G.edges()), sorted(P.edges())) assert_edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) - e=((n,n+1) for n in range(3)) - G=Graph(e) + e = ((n, n + 1) for n in range(3)) + G = nx.Graph(e) assert_nodes_equal(sorted(G.nodes()), sorted(P.nodes())) assert_edges_equal(sorted(G.edges()), sorted(P.edges())) assert_edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) @@ -200,24 +204,24 @@ def test_edgelists(self): def test_directed_to_undirected(self): edges1 = [(0, 1), (1, 2), (2, 0)] edges2 = [(0, 1), (1, 2), (0, 2)] - assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(),edges1)) - assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(),edges1)) - assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(),edges1)) - assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(),edges1)) + assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(), edges1)) + assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(), edges1)) + assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(), edges1)) + assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(), edges1)) assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(), edges1)) assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(), edges1)) - assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(),edges1)) - assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(),edges1)) + assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(), edges1)) + assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(), edges1)) def test_attribute_dict_integrity(self): - # we must not replace dict-like graph data structures with dicts - G=OrderedGraph() + # we must not replace dict-like graph data structures with dicts + G = nx.OrderedGraph() G.add_nodes_from("abc") - H=to_networkx_graph(G, create_using=OrderedGraph()) - assert_equal(list(H.node),list(G.node)) - H=OrderedDiGraph(G) - assert_equal(list(H.node),list(G.node)) + H = to_networkx_graph(G, create_using=nx.OrderedGraph()) + assert_equal(list(H.node), list(G.node)) + H = nx.OrderedDiGraph(G) + assert_equal(list(H.node), list(G.node)) diff --git a/networkx/tests/test_convert_pandas.py b/networkx/tests/test_convert_pandas.py --- a/networkx/tests/test_convert_pandas.py +++ b/networkx/tests/test_convert_pandas.py @@ -2,9 +2,12 @@ from nose.tools import assert_true import networkx as nx +from networkx.testing import assert_nodes_equal, assert_edges_equal + class TestConvertPandas(object): - numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test + numpy = 1 # nosetests attribute, use nosetests -a 'not numpy' to skip test + @classmethod def setupClass(cls): try: @@ -17,49 +20,70 @@ def __init__(self, ): import pandas as pd self.r = pd.np.random.RandomState(seed=5) - ints = self.r.random_integers(1, 10, size=(3,2)) + ints = self.r.random_integers(1, 10, size=(3, 2)) a = ['A', 'B', 'C'] b = ['D', 'A', 'E'] df = pd.DataFrame(ints, columns=['weight', 'cost']) - df[0] = a # Column label 0 (int) - df['b'] = b # Column label 'b' (str) + df[0] = a # Column label 0 (int) + df['b'] = b # Column label 'b' (str) self.df = df mdf = pd.DataFrame([[4, 16, 'A', 'D']], - columns=['weight', 'cost', 0, 'b']) + columns=['weight', 'cost', 0, 'b']) self.mdf = df.append(mdf) def assert_equal(self, G1, G2): - assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y )) + assert_true(nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y)) def test_from_dataframe_all_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}), - ('B', 'A', {'cost': 1, 'weight': 7}), - ('A', 'D', {'cost': 7, 'weight': 4})]) - G=nx.from_pandas_dataframe(self.df, 0, 'b', True) + ('B', 'A', {'cost': 1, 'weight': 7}), + ('A', 'D', {'cost': 7, 'weight': 4})]) + G = nx.from_pandas_dataframe(self.df, 0, 'b', True) self.assert_equal(G, Gtrue) # MultiGraph MGtrue = nx.MultiGraph(Gtrue) MGtrue.add_edge('A', 'D', cost=16, weight=4) - MG=nx.from_pandas_dataframe(self.mdf, 0, 'b', True, nx.MultiGraph()) + MG = nx.from_pandas_dataframe(self.mdf, 0, 'b', True, nx.MultiGraph()) self.assert_equal(MG, MGtrue) def test_from_dataframe_multi_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}), - ('B', 'A', {'cost': 1, 'weight': 7}), - ('A', 'D', {'cost': 7, 'weight': 4})]) - G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost']) + ('B', 'A', {'cost': 1, 'weight': 7}), + ('A', 'D', {'cost': 7, 'weight': 4})]) + G = nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost']) self.assert_equal(G, Gtrue) def test_from_dataframe_one_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'weight': 10}), - ('B', 'A', {'weight': 7}), - ('A', 'D', {'weight': 4})]) - G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight') + ('B', 'A', {'weight': 7}), + ('A', 'D', {'weight': 4})]) + G = nx.from_pandas_dataframe(self.df, 0, 'b', 'weight') self.assert_equal(G, Gtrue) def test_from_dataframe_no_attr(self, ): Gtrue = nx.Graph([('E', 'C', {}), - ('B', 'A', {}), - ('A', 'D', {})]) - G=nx.from_pandas_dataframe(self.df, 0, 'b',) + ('B', 'A', {}), + ('A', 'D', {})]) + G = nx.from_pandas_dataframe(self.df, 0, 'b',) self.assert_equal(G, Gtrue) + + def test_from_datafram(self, ): + # Pandas DataFrame + g = nx.cycle_graph(10) + G = nx.Graph() + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, u) for u, v in g.edges()) + edgelist = nx.to_edgelist(G) + source = [s for s, t, d in edgelist] + target = [t for s, t, d in edgelist] + weight = [d['weight'] for s, t, d in edgelist] + import pandas as pd + edges = pd.DataFrame({'source': source, + 'target': target, + 'weight': weight}) + GG = nx.from_pandas_dataframe(edges, edge_attr='weight') + assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert_edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = nx.to_networkx_graph(edges, create_using=nx.Graph()) + assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert_edges_equal(sorted(G.edges()), sorted(GW.edges()))
Invalid call to from_pandas_dataframe function? While looking for a problem in my branch, I noticed what looks like an invalid chunk of code (direct link to the line [here](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/convert.py#L133-L134)): ``` try: import pandas as pd if isinstance(data, pd.DataFrame): try: return nx.from_pandas_dataframe(data, create_using=create_using) except: msg = "Input is not a correct Pandas DataFrame." raise nx.NetworkXError(msg) ``` `from_pandas_dataframe` requires, at the very least, _three_ arguments -- with `create_from` being an optional keyword argument. Because the `try: except:` block is untyped, this will always report a `NetworkXError` no matter what. Invalid call to from_pandas_dataframe function? While looking for a problem in my branch, I noticed what looks like an invalid chunk of code (direct link to the line [here](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/convert.py#L133-L134)): ``` try: import pandas as pd if isinstance(data, pd.DataFrame): try: return nx.from_pandas_dataframe(data, create_using=create_using) except: msg = "Input is not a correct Pandas DataFrame." raise nx.NetworkXError(msg) ``` `from_pandas_dataframe` requires, at the very least, _three_ arguments -- with `create_from` being an optional keyword argument. Because the `try: except:` block is untyped, this will always report a `NetworkXError` no matter what.
I agree, that isn't going to work. I guess we are not testing this and we should. I agree, that isn't going to work. I guess we are not testing this and we should.
2017-06-27T21:25:47
networkx/networkx
2,497
networkx__networkx-2497
[ "2362" ]
26e6efcb88e132ff76130531ca7e88583f4af9e2
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py --- a/networkx/readwrite/gml.py +++ b/networkx/readwrite/gml.py @@ -565,7 +565,7 @@ def generate_gml(G, stringizer=None): Notes ----- Graph attributes named 'directed', 'multigraph', 'node' or - 'edge',node attributes named 'id' or 'label', edge attributes + 'edge', node attributes named 'id' or 'label', edge attributes named 'source' or 'target' (or 'key' if `G` is a multigraph) are ignored because these attribute names are used to encode the graph structure. @@ -581,7 +581,10 @@ def stringize(key, value, ignored_keys, indent, in_list=False): key = str(key) if key not in ignored_keys: if isinstance(value, (int, long)): - yield indent + key + ' ' + str(value) + if key == 'label': + yield indent + key + ' "' + str(value) + '"' + else: + yield indent + key + ' ' + str(value) elif isinstance(value, float): text = repr(value).upper() # GML requires that a real literal contain a decimal point, but @@ -590,7 +593,10 @@ def stringize(key, value, ignored_keys, indent, in_list=False): epos = text.rfind('E') if epos != -1 and text.find('.', 0, epos) == -1: text = text[:epos] + '.' + text[epos:] - yield indent + key + ' ' + text + if key == 'label': + yield indent + key + ' "' + test + '"' + else: + yield indent + key + ' ' + text elif isinstance(value, dict): yield indent + key + ' [' next_indent = indent + ' '
diff --git a/networkx/readwrite/tests/test_gml.py b/networkx/readwrite/tests/test_gml.py --- a/networkx/readwrite/tests/test_gml.py +++ b/networkx/readwrite/tests/test_gml.py @@ -180,6 +180,19 @@ def test_read_gml(self): os.close(fd) os.unlink(fname) + def test_labels_are_strings(self): + # GML requires labels to be strings (i.e., in quotes) + answer = """graph [ + node [ + id 0 + label "1203" + ] +]""" + G = nx.Graph() + G.add_node(1203) + data = '\n'.join(nx.generate_gml(G, stringizer=literal_stringizer)) + assert_equal(data, answer) + def test_relabel_duplicate(self): data = """ graph @@ -240,12 +253,29 @@ def test_quotes(self): name "path_graph(1)" node [ id 0 - label 0 + label "0" demo "This is &#34;quoted&#34; and this is a copyright: &#169;" ] ]""" assert_equal(data, answer) + def test_unicode_node(self): + node = 'node' + unichr(169) + G = nx.Graph() + G.add_node(node) + fobj = tempfile.NamedTemporaryFile() + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode('ascii') + answer = """graph [ + node [ + id 0 + label "node&#169;" + ] +]""" + assert_equal(data, answer) + def test_name(self): G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]') assert_equal('x', G.graph['name']) @@ -261,7 +291,7 @@ def test_graph_types(self): gml += ' directed ' + str(int(directed)) if multigraph is not None: gml += ' multigraph ' + str(int(multigraph)) - gml += ' node [ id 0 label 0 ]' + gml += ' node [ id 0 label "0" ]' gml += ' edge [ source 0 target 0 ]' gml += ' ]' G = nx.parse_gml(gml) @@ -274,7 +304,7 @@ def test_graph_types(self): gml += ' multigraph 1\n' gml += """ node [ id 0 - label 0 + label "0" ] edge [ source 0
label in gml output should be quoted The output of the `write_gml` function currently (1.11) does not quote the label values, but I do think it should according to the [GML spec.](http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf). Because of this, tools like Cytoscape, jhive and gml2gv fails to read the gml file. A node from my output: ``` node [ id 0 label 1203 ] ``` that should be ``` node [ id 0 label "1203" ] ```
That seems fine to me.
2017-07-07T02:08:51
networkx/networkx
2,499
networkx__networkx-2499
[ "2383" ]
26e6efcb88e132ff76130531ca7e88583f4af9e2
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py --- a/networkx/readwrite/gml.py +++ b/networkx/readwrite/gml.py @@ -423,7 +423,9 @@ def pop_attr(dct, category, attr, i): G.add_edge(source, target, **edge) else: raise nx.NetworkXError( - 'edge #%d (%r%s%r) is duplicated' % + """edge #%d (%r%s%r) is duplicated + +Hint: If this is a multigraph, add "multigraph 1" to the header of the file.""" % (i, source, '->' if directed else '--', target)) else: key = edge.pop('key', None)
Reading GML files Reading the `polblogs.gml` file [downloaded here ](http://www-personal.umich.edu/~mejn/netdata/) using NetworkX's `read_gml` function throws `NetworkXError: edge is duplicated`. This was not the case previously. I'm running both `Python 2.7` and `Python 3.6`; the same error is thrown using both kernels. My NetworkX version is 1.11.
It looks like the code has changed significantly since your previous experience. I didn't look into the details yet. A workaround for this error is to add "multigraph 1" to the header of the file to reflect that it has multiple edges ``` graph [ directed 1 multigraph 1 node [ id 1 ... ``` The previous version of read_gml figured out whether the input file was a graph or multigraph by inspecting all of the edges. At minimum this needs some documentation.
2017-07-07T22:28:18
networkx/networkx
2,500
networkx__networkx-2500
[ "2240" ]
26e6efcb88e132ff76130531ca7e88583f4af9e2
diff --git a/networkx/algorithms/dag.py b/networkx/algorithms/dag.py --- a/networkx/algorithms/dag.py +++ b/networkx/algorithms/dag.py @@ -1,5 +1,10 @@ # -*- coding: utf-8 -*- -"""Algorithms for directed acyclic graphs (DAGs).""" +"""Algorithms for directed acyclic graphs (DAGs). + +Note that most of these functions are only guaranteed to work for DAGs. +In general, these functions do not check for acyclic-ness, so it is up +to the user to check for that. +""" # Copyright (C) 2006-2016 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> @@ -31,17 +36,18 @@ def descendants(G, source): - """Return all nodes reachable from `source` in G. + """Return all nodes reachable from `source` in `G`. Parameters ---------- G : NetworkX DiGraph - source : node in G + A directed acyclic graph (DAG) + source : node in `G` Returns ------- - des : set() - The descendants of source in G + set() + The descendants of `source` in `G` """ if not G.has_node(source): raise nx.NetworkXError("The node %s is not in the graph." % source) @@ -50,16 +56,17 @@ def descendants(G, source): def ancestors(G, source): - """Return all nodes having a path to `source` in G. + """Return all nodes having a path to `source` in `G`. Parameters ---------- G : NetworkX DiGraph - source : node in G + A directed acyclic graph (DAG) + source : node in `G` Returns ------- - ancestors : set() + set() The ancestors of source in G """ if not G.has_node(source): @@ -69,18 +76,17 @@ def ancestors(G, source): def is_directed_acyclic_graph(G): - """Return True if the graph G is a directed acyclic graph (DAG) or + """Return True if the graph `G` is a directed acyclic graph (DAG) or False if not. Parameters ---------- G : NetworkX graph - A graph Returns ------- - is_dag : bool - True if G is a DAG, false otherwise + bool + True if `G` is a DAG, False otherwise """ if not G.is_directed(): return False @@ -101,39 +107,39 @@ def topological_sort(G): Parameters ---------- G : NetworkX digraph - A directed graph + A directed acyclic graph (DAG) Returns ------- - topologically_sorted_nodes : iterable + iterable An iterable of node names in topological sorted order. Raises ------ NetworkXError - Topological sort is defined for directed graphs only. If the graph G + Topological sort is defined for directed graphs only. If the graph `G` is undirected, a :exc:`NetworkXError` is raised. NetworkXUnfeasible - If G is not a directed acyclic graph (DAG) no topological sort exists - and a NetworkXUnfeasible exception is raised. This can also be - raised if G is changed while the returned iterator is being processed. + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed. RuntimeError - If G is changed while the returned iterator is being processed. + If `G` is changed while the returned iterator is being processed. Examples -------- - To get the reverse order of the topological sort:: + To get the reverse order of the topological sort: - >>> DG = nx.DiGraph([(1, 2), (2, 3)]) - >>> list(reversed(list(nx.topological_sort(DG)))) - [3, 2, 1] + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> list(reversed(list(nx.topological_sort(DG)))) + [3, 2, 1] Notes ----- This algorithm is based on a description and proof in - Introduction to algorithms - a creative approach [1]_ . + "Introduction to Algorithms: A Creative Approach" [1]_ . See also -------- @@ -141,8 +147,8 @@ def topological_sort(G): References ---------- - .. [1] Manber, U. (1989). Introduction to algorithms - a creative approach. Addison-Wesley. - http://www.amazon.com/Introduction-Algorithms-A-Creative-Approach/dp/0201120372 + .. [1] Manber, U. (1989). "Introduction to Algorithms: A Creative Approach." + Addison-Wesley. """ if not G.is_directed(): raise nx.NetworkXError( @@ -183,7 +189,7 @@ def lexicographical_topological_sort(G, key=None): Parameters ---------- G : NetworkX digraph - A directed graph + A directed acyclic graph (DAG) key : function, optional This function maps nodes to keys with which to resolve ambiguities in @@ -191,27 +197,27 @@ def lexicographical_topological_sort(G, key=None): Returns ------- - lexicographically_topologically_sorted_nodes : iterable + iterable An iterable of node names in lexicographical topological sort order. Raises ------ NetworkXError - Topological sort is defined for directed graphs only. If the graph G + Topological sort is defined for directed graphs only. If the graph `G` is undirected, a :exc:`NetworkXError` is raised. NetworkXUnfeasible - If G is not a directed acyclic graph (DAG) no topological sort exists - and a NetworkXUnfeasible exception is raised. This can also be - raised if G is changed while the returned iterator is being processed. + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed. RuntimeError - If G is changed while the returned iterator is being processed. + If `G` is changed while the returned iterator is being processed. Notes ----- This algorithm is based on a description and proof in - Introduction to algorithms - a creative approach [1]_ . + "Introduction to Algorithms: A Creative Approach" [1]_ . See also -------- @@ -219,8 +225,8 @@ def lexicographical_topological_sort(G, key=None): References ---------- - .. [1] Manber, U. (1989). Introduction to algorithms - a creative approach. Addison-Wesley. - http://www.amazon.com/Introduction-Algorithms-A-Creative-Approach/dp/0201120372 + .. [1] Manber, U. (1989). "Introduction to Algorithms: A Creative Approach." + Addison-Wesley. """ if not G.is_directed(): raise nx.NetworkXError( @@ -259,7 +265,7 @@ def create_tuple(node): def is_aperiodic(G): - """Return True if G is aperiodic. + """Return True if `G` is aperiodic. A directed graph is aperiodic if there is no integer k > 1 that divides the length of every cycle in the graph. @@ -267,28 +273,28 @@ def is_aperiodic(G): Parameters ---------- G : NetworkX DiGraph - Graph + A directed graph Returns ------- - aperiodic : boolean + bool True if the graph is aperiodic False otherwise Raises ------ NetworkXError - If G is not directed + If `G` is not directed Notes ----- This uses the method outlined in [1]_, which runs in O(m) time - given m edges in G. Note that a graph is not aperiodic if it is + given m edges in `G`. Note that a graph is not aperiodic if it is acyclic as every integer trivial divides length 0 cycles. References ---------- .. [1] Jarvis, J. P.; Shier, D. R. (1996), - Graph-theoretic analysis of finite Markov chains, + "Graph-theoretic analysis of finite Markov chains," in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling: A Multidisciplinary Approach, CRC Press. """ @@ -329,17 +335,17 @@ def transitive_closure(G): Parameters ---------- G : NetworkX DiGraph - Graph + A directed graph Returns ------- - TC : NetworkX DiGraph - Graph + NetworkX DiGraph + The transitive closure of `G` Raises ------ NetworkXNotImplemented - If G is not directed + If `G` is not directed References ---------- @@ -366,40 +372,40 @@ def transitive_reduction(G): Parameters ---------- G : NetworkX DiGraph - Graph + A directed acyclic graph (DAG) Returns ------- - TR : NetworkX DiGraph - Graph + NetworkX DiGraph + The transitive reduction of `G` Raises ------ NetworkXError - If G is not a directed acyclic graph (DAG) transitive reduction is - not uniquely defined and a NetworkXError exception is raised. + If `G` is not a directed acyclic graph (DAG) transitive reduction is + not uniquely defined and a :exc:`NetworkXError` exception is raised. - References + References ---------- https://en.wikipedia.org/wiki/Transitive_reduction - """ + """ if not is_directed_acyclic_graph(G): raise nx.NetworkXError( - "Transitive reduction only uniquely defined on directed acyclic graphs.") + "Transitive reduction only uniquely defined on directed acyclic graphs.") TR = nx.DiGraph() TR.add_nodes_from(G.nodes()) for u in G: u_edges = set(G[u]) for v in G[u]: u_edges -= {y for x, y in nx.dfs_edges(G, v)} - TR.add_edges_from((u,v) for v in u_edges) + TR.add_edges_from((u, v) for v in u_edges) return TR @not_implemented_for('undirected') def antichains(G): - """Generates antichains from a DAG. + """Generates antichains from a directed acyclic graph (DAG). An antichain is a subset of a partially ordered set such that any two elements in the subset are incomparable. @@ -407,19 +413,19 @@ def antichains(G): Parameters ---------- G : NetworkX DiGraph - Graph + A directed acyclic graph (DAG) Returns ------- - antichain : generator object + generator object Raises ------ NetworkXNotImplemented - If G is not directed + If `G` is not directed NetworkXUnfeasible - If G contains a cycle + If `G` contains a cycle Notes ----- @@ -452,38 +458,39 @@ def antichains(G): @not_implemented_for('undirected') def dag_longest_path(G, weight='weight', default_weight=1): - """Returns the longest path in a DAG - If G has edges with 'weight' attribute the edge data are used as weight values. + """Returns the longest path in a directed acyclic graph (DAG). + + If `G` has edges with `weight` attribute the edge data are used as weight values. Parameters ---------- G : NetworkX DiGraph - Graph + A directed acyclic graph (DAG) - weight : string (default 'weight') + weight : str, optional Edge data key to use for weight - default_weight : integer (default 1) + default_weight : int, optional The weight of edges that do not have a weight attribute Returns ------- - path : list + list Longest path Raises ------ NetworkXNotImplemented - If G is not directed + If `G` is not directed See also -------- dag_longest_path_length """ - dist = {} # stores {v : (length, u)} + dist = {} # stores {v : (length, u)} for v in nx.topological_sort(G): us = [(dist[u][0] + data.get(weight, default_weight), u) - for u, data in G.pred[v].items()] + for u, data in G.pred[v].items()] # Use the best predecessor if there is one and its distance is non-negative, otherwise terminate. maxu = max(us, key=lambda x: x[0]) if us else (0, v) dist[v] = maxu if maxu[0] >= 0 else (0, v) @@ -505,23 +512,23 @@ def dag_longest_path_length(G, weight='weight', default_weight=1): Parameters ---------- G : NetworkX DiGraph - Graph + A directed acyclic graph (DAG) - weight : string (default 'weight') + weight : string, optional Edge data key to use for weight - default_weight : integer (default 1) + default_weight : int, optional The weight of edges that do not have a weight attribute Returns ------- - path_length : int + int Longest path length Raises ------ NetworkXNotImplemented - If G is not directed + If `G` is not directed See also --------
networkx.ancestors is inconsistent with networkx.has_path the function **networkx.ancestors(G, source)** is supposed to > Return all nodes having a path to (source) in G. but ``` >>> g = networkx.DiGraph() >>> g.add_edge(1,1) >>> networkx.has_path(g,1,1) True >>> networkx.ancestors(g,1) set([]) ``` where the answer should be `set([1])`, right? Apparently ancestors ignores loops. BTW: docstring of **networkx.ancestors** contains uninterpreted markdown
First, the definition for a path allows a path of length zero, i.e., from a node to itself, so the self-loop in your example is unnecessary: ``` python >>> G = nx.DiGraph() >>> G.add_node(0) >>> nx.has_path(G, 0, 0) True ``` Second, the `networkx.dag.ancestors` function is only guaranteed to work for directed acyclic graphs, and your graph has a cycle of length one, or in other words, a self-loop. In general, the functions in `networkx.dag` don't check for acyclic-ness, so it is up to the user to check for that. You can remove self-loops by doing `G.remove_edges_from(G.selfloop_edges())`. > Second, the networkx.dag.ancestors function is only guaranteed to work for directed acyclic graphs shouldn't this be in the documentation? EDIT: ok, it belongs to **Docs » Reference » Reference » Algorithms » Directed Acyclic Graphs**, I just didn't notice it. The documentation is a bit spread out because the project is so large, unfortunately. But it would be nice for the precondition that graphs are directed and acyclic to be stated somewhere, like at the `netwrokx.algorithms.dag` package-level docstring.
2017-07-08T01:41:53
networkx/networkx
2,504
networkx__networkx-2504
[ "1454" ]
949a873feb2671c7fae40e7563cb6e88e422f0b1
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py --- a/networkx/readwrite/gml.py +++ b/networkx/readwrite/gml.py @@ -19,16 +19,25 @@ standard file format in the Graphlet graph editor system. It has been overtaken and adapted by several other systems for drawing graphs." -See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html +GML files are stored using a 7-bit ASCII encoding with any extended +ASCII characters (iso8859-1) appearing as HTML character entities. +You will need to give some thought into how the exported data should +interact with different languages and even different Python versions. +Re-importing from gml is also a concern. -Format ------- -See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html -for format specification. +Without specifying a `stringizer`/`destringizer`, the code is capable of +handling `int`/`float`/`str`/`dict`/`list` data as required by the GML +specification. For other data types, you need to explicitly supply a +`stringizer`/`destringizer`. -Example graphs in GML format -http://www-personal.umich.edu/~mejn/netdata/ +For better interoperability of data generated by Python 2 and Python 3, +we've provided `literal_stringizer` and `literal_destringizer`. +For additional documentation on the GML file format, please see the +`GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_. + +Several example graphs in GML format may be found on Mark Newman's +`Network data page <http://www-personal.umich.edu/~mejn/netdata/>`_. """ try: try: @@ -144,7 +153,7 @@ def literal_destringizer(rep): @open_file(0, mode='rb') def read_gml(path, label='label', destringizer=None): - """Read graph in GML format from path. + """Read graph in GML format from `path`. Parameters ---------- @@ -156,7 +165,7 @@ def read_gml(path, label='label', destringizer=None): attributes indicated by `label`. Default value: 'label'. destringizer : callable, optional - A destringizer that recovers values stored as strings in GML. If it + A `destringizer` that recovers values stored as strings in GML. If it cannot convert a string to a value, a `ValueError` is raised. Default value : None. @@ -172,17 +181,21 @@ def read_gml(path, label='label', destringizer=None): See Also -------- - write_gml, parse_gml + write_gml, parse_gml, literal_destringizer Notes ----- - The GML specification says that files should be ASCII encoded, with any - extended ASCII characters (iso8859-1) appearing as HTML character entities. + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + handling `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For other data types, you need to explicitly supply a + `stringizer`/`destringizer`. - References - ---------- - GML specification: - http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html + For additional documentation on the GML file format, please see the + `GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_. + + See the module docstring :mod:`networkx.readwrite.gml` for additional details. Examples -------- @@ -219,7 +232,7 @@ def parse_gml(lines, label='label', destringizer=None): attributes indicated by `label`. Default value: 'label'. destringizer : callable, optional - A destringizer that recovers values stored as strings in GML. If it + A `destringizer` that recovers values stored as strings in GML. If it cannot convert a string to a value, a `ValueError` is raised. Default value : None. @@ -235,17 +248,24 @@ def parse_gml(lines, label='label', destringizer=None): See Also -------- - write_gml, read_gml + write_gml, read_gml, literal_destringizer Notes ----- - This stores nested GML attributes as dictionaries in the - NetworkX graph, node, and edge attribute structures. + This stores nested GML attributes as dictionaries in the NetworkX graph, + node, and edge attribute structures. - References - ---------- - GML specification: - http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + handling `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For other data types, you need to explicitly supply a + `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_. + + See the module docstring :mod:`networkx.readwrite.gml` for additional details. """ def decode_line(line): if isinstance(line, bytes): @@ -277,7 +297,7 @@ def filter_lines(lines): def parse_gml_lines(lines, label, destringizer): - """Parse GML into a graph. + """Parse GML `lines` into a graph. """ def tokenize(): patterns = [ @@ -441,12 +461,12 @@ def pop_attr(dct, category, attr, i): def literal_stringizer(value): - """Convert a value to a Python literal in GML representation. + """Convert a `value` to a Python literal in GML representation. Parameters ---------- value : object - The value to be converted to GML representation. + The `value` to be converted to GML representation. Returns ------- @@ -541,7 +561,7 @@ def stringize(value): def generate_gml(G, stringizer=None): - """Generate a single entry of the graph G in GML format. + r"""Generate a single entry of the graph `G` in GML format. Parameters ---------- @@ -549,9 +569,9 @@ def generate_gml(G, stringizer=None): The graph to be converted to GML. stringizer : callable, optional - A stringizer which converts non-int/float/dict values into strings. If - it cannot convert a value into a string, it should raise a - `ValueError` raised to indicate that. Default value: None. + A `stringizer` which converts non-int/non-float/non-dict values into + strings. If it cannot convert a value into a string, it should raise a + `ValueError` to indicate that. Default value: None. Returns ------- @@ -564,6 +584,10 @@ def generate_gml(G, stringizer=None): If `stringizer` cannot convert a value into a string, or the value to convert is not a string while `stringizer` is None. + See Also + -------- + literal_stringizer + Notes ----- Graph attributes named 'directed', 'multigraph', 'node' or @@ -571,6 +595,53 @@ def generate_gml(G, stringizer=None): named 'source' or 'target' (or 'key' if `G` is a multigraph) are ignored because these attribute names are used to encode the graph structure. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + handling `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For other data types, you need to explicitly supply a + `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_. + + See the module docstring :mod:`networkx.readwrite.gml` for additional details. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_node("1") + >>> print("\n".join(nx.generate_gml(G))) + graph [ + node [ + id 0 + label "1" + ] + ] + >>> G = nx.OrderedMultiGraph([("a", "b"), ("a", "b")]) + >>> print("\n".join(nx.generate_gml(G))) + graph [ + multigraph 1 + node [ + id 0 + label "a" + ] + node [ + id 1 + label "b" + ] + edge [ + source 0 + target 1 + key 0 + ] + edge [ + source 0 + target 1 + key 1 + ] + ] """ valid_keys = re.compile('^[A-Za-z][0-9A-Za-z]*$') @@ -682,7 +753,7 @@ def write_gml(G, path, stringizer=None): .bz2 will be compressed. stringizer : callable, optional - A stringizer which converts non-int/non-float/non-dict values into + A `stringizer` which converts non-int/non-float/non-dict values into strings. If it cannot convert a value into a string, it should raise a `ValueError` to indicate that. Default value: None. @@ -694,16 +765,28 @@ def write_gml(G, path, stringizer=None): See Also -------- - read_gml, generate_gml + read_gml, generate_gml, literal_stringizer Notes ----- Graph attributes named 'directed', 'multigraph', 'node' or - 'edge',node attributes named 'id' or 'label', edge attributes + 'edge', node attributes named 'id' or 'label', edge attributes named 'source' or 'target' (or 'key' if `G` is a multigraph) are ignored because these attribute names are used to encode the graph structure. + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + handling `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For other data types, you need to explicitly supply a + `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML website <http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html>`_. + + See the module docstring :mod:`networkx.readwrite.gml` for additional details. + Examples -------- >>> G = nx.path_graph(4)
diff --git a/networkx/readwrite/tests/test_gml.py b/networkx/readwrite/tests/test_gml.py --- a/networkx/readwrite/tests/test_gml.py +++ b/networkx/readwrite/tests/test_gml.py @@ -65,6 +65,7 @@ def setUp(self): ] ] """ + def test_parse_gml_cytoscape_bug(self): # example from issue #321, originally #324 in trac cytoscape_example = """ @@ -197,18 +198,18 @@ def test_relabel_duplicate(self): data = """ graph [ - label "" - directed 1 - node - [ - id 0 - label "same" - ] - node - [ - id 1 - label "same" - ] + label "" + directed 1 + node + [ + id 0 + label "same" + ] + node + [ + id 1 + label "same" + ] ] """ fh = io.BytesIO(data.encode('UTF-8'))
Update `write_gml` docstring. See discussion in #1449. I think it would be helpful if we added a few comments to the docstring to make some of the conversion issues more transparent for `write_gml`.
Was this resolved? As per as the discussion in #1486, this needs more work. @SanketDG Do you have any concrete suggestions? @chebee7i said that it was best to link to user documentation. I think using the [cross referencing feature](http://sphinx-doc.org/domains.html#cross-referencing-python-objects) from sphinx will be a good way to link to the documentation, in this case [`readwrite.gml.html`](http://networkx.github.io/documentation/latest/reference/readwrite.gml.html) We write the issues involved with the stringizers along with examples in the module level docstring of `networkx.readwrite.gml`. Then using `:mod:`~networkx.readwrite.gml`` (cross referencing) in the docstring of `write_gml()` should create a link to `readwrite.gml.html`. This will be version independent. I think the only downside is that I can't name the link so the link will just look like this: [gml](#). With a proper message, this shouldn't be a problem. Can we defer this and release 1.10 now? Yes, creating the examples for this will require a discussion, so it's best if we keep it out of `1.10` if its release is near. Until I looked up online why my `write_gml` wasn't working, I was confused why I couldn't write nodes that had underscores in them. Can we add to the documentation that graph, node, and edge attributes cannot have underscores when writing using `write_gml`? It might be better to point to the specification http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf since there are also other restrictions on what keys and values can be. Good suggestion, @hagberg. @chebee7i, are you still interested in working on this PR? Can you add this to the list of things to fix?
2017-07-08T18:10:49
networkx/networkx
2,505
networkx__networkx-2505
[ "1928" ]
3eda45bd7aa69b072d1faff9b1df50f3adb3960e
diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py --- a/networkx/algorithms/traversal/depth_first_search.py +++ b/networkx/algorithms/traversal/depth_first_search.py @@ -9,12 +9,7 @@ # # Author: # Aric Hagberg <[email protected]> -""" -Basic algorithms for depth-first searching the nodes of a graph. - -Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py -by D. Eppstein, July 2004. -""" +"""Basic algorithms for depth-first searching the nodes of a graph.""" import networkx as nx from collections import defaultdict @@ -24,7 +19,7 @@ 'dfs_labeled_edges'] -def dfs_edges(G, source=None): +def dfs_edges(G, source=None, depth_limit=None): """Iterate over edges in a depth-first-search (DFS). Parameters @@ -35,6 +30,9 @@ def dfs_edges(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- edges: generator @@ -42,18 +40,25 @@ def dfs_edges(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_edges(G, 0))) + >>> G = nx.path_graph(5) + >>> list(nx.dfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4)] + >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) [(0, 1), (1, 2)] Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + See Also -------- dfs_preorder_nodes @@ -67,24 +72,27 @@ def dfs_edges(G, source=None): # edges for components with source nodes = [source] visited = set() + if depth_limit is None: + depth_limit = len(G) for start in nodes: if start in visited: continue visited.add(start) - stack = [(start, iter(G[start]))] + stack = [(start, depth_limit, iter(G[start]))] while stack: - parent, children = stack[-1] + parent, depth_now, children = stack[-1] try: child = next(children) if child not in visited: yield parent, child visited.add(child) - stack.append((child, iter(G[child]))) + if depth_now > 1: + stack.append((child, depth_now-1, iter(G[child]))) except StopIteration: stack.pop() -def dfs_tree(G, source=None): +def dfs_tree(G, source=None, depth_limit=None): """Return oriented tree constructed from a depth-first-search from source. Parameters @@ -94,6 +102,9 @@ def dfs_tree(G, source=None): source : node, optional Specify starting node for depth-first search. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- T : NetworkX DiGraph @@ -101,21 +112,25 @@ def dfs_tree(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> T = nx.dfs_tree(G, 0) - >>> print(list(T.edges())) + >>> G = nx.path_graph(5) + >>> T = nx.dfs_tree(G, source=0, depth_limit=2) + >>> list(T.edges()) [(0, 1), (1, 2)] + >>> T = nx.dfs_tree(G, source=0) + >>> list(T.edges()) + [(0, 1), (1, 2), (2, 3), (3, 4)] + """ T = nx.DiGraph() if source is None: T.add_nodes_from(G) else: T.add_node(source) - T.add_edges_from(dfs_edges(G, source)) + T.add_edges_from(dfs_edges(G, source, depth_limit)) return T -def dfs_predecessors(G, source=None): +def dfs_predecessors(G, source=None, depth_limit=None): """Return dictionary of predecessors in depth-first-search from source. Parameters @@ -126,6 +141,9 @@ def dfs_predecessors(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- pred: dict @@ -133,22 +151,29 @@ def dfs_predecessors(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> print(nx.dfs_predecessors(G, 0)) + >>> G = nx.path_graph(4) + >>> nx.dfs_predecessors(G, source=0) + {1: 0, 2: 1, 3: 2} + >>> nx.dfs_predecessors(G, source=0, depth_limit=2) {1: 0, 2: 1} Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search """ - return dict((t, s) for s, t in dfs_edges(G, source=source)) + return {t: s for s, t in dfs_edges(G, source, depth_limit)} -def dfs_successors(G, source=None): +def dfs_successors(G, source=None, depth_limit=None): """Return dictionary of successors in depth-first-search from source. Parameters @@ -159,6 +184,9 @@ def dfs_successors(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- succ: dict @@ -166,27 +194,33 @@ def dfs_successors(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> print(nx.dfs_successors(G, 0)) + >>> G = nx.path_graph(5) + >>> nx.dfs_successors(G, source=0) + {0: [1], 1: [2], 2: [3], 3: [4]} + >>> nx.dfs_successors(G, source=0, depth_limit=2) {0: [1], 1: [2]} Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search """ d = defaultdict(list) - for s, t in dfs_edges(G, source=source): + for s, t in dfs_edges(G, source=source, depth_limit=depth_limit): d[s].append(t) return dict(d) -def dfs_postorder_nodes(G, source=None): - """Iterate over nodes in a depth-first-search post-ordering starting - from source. +def dfs_postorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search post-ordering starting at source. Parameters ---------- @@ -196,6 +230,9 @@ def dfs_postorder_nodes(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- nodes: generator @@ -203,33 +240,37 @@ def dfs_postorder_nodes(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_postorder_nodes(G, 0))) - [2, 1, 0] + >>> G = nx.path_graph(5) + >>> list(nx.dfs_postorder_nodes(G, source=0)) + [4, 3, 2, 1, 0] + >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) + [1, 0] Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + See Also -------- dfs_edges dfs_preorder_nodes dfs_labeled_edges """ - post = (v for u, v, d in nx.dfs_labeled_edges(G, source=source) - if d == 'reverse') - # potential modification: chain source to end of post-ordering - # return chain(post, [source]) - return post + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == 'reverse') -def dfs_preorder_nodes(G, source=None): - """Iterate over nodes in a depth-first-search pre-ordering starting - from source. +def dfs_preorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search pre-ordering starting at source. Parameters ---------- @@ -239,6 +280,9 @@ def dfs_preorder_nodes(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- nodes: generator @@ -246,32 +290,36 @@ def dfs_preorder_nodes(G, source=None): Examples -------- - >>> G = nx.path_graph(3) - >>> print(list(nx.dfs_preorder_nodes(G, 0))) + >>> G = nx.path_graph(5) + >>> list(nx.dfs_preorder_nodes(G, source=0)) + [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) [0, 1, 2] Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + See Also -------- dfs_edges dfs_postorder_nodes dfs_labeled_edges """ - pre = (v for u, v, d in nx.dfs_labeled_edges(G, source=source) - if d == 'forward') - # potential modification: chain source to beginning of pre-ordering - # return chain([source], pre) - return pre + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == 'forward') -def dfs_labeled_edges(G, source=None): +def dfs_labeled_edges(G, source=None, depth_limit=None): """Iterate over edges in a depth-first-search (DFS) labeled by type. Parameters @@ -282,6 +330,9 @@ def dfs_labeled_edges(G, source=None): Specify starting node for depth-first search and return edges in the component reachable from source. + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + Returns ------- edges: generator @@ -314,12 +365,17 @@ def dfs_labeled_edges(G, source=None): Notes ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - by D. Eppstein, July 2004. - If a source is not specified then a source is chosen arbitrarily and repeatedly until all components in the graph are searched. + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + See Also -------- dfs_edges @@ -335,14 +391,16 @@ def dfs_labeled_edges(G, source=None): # edges for components with source nodes = [source] visited = set() + if depth_limit is None: + depth_limit = len(G) for start in nodes: if start in visited: continue yield start, start, 'forward' visited.add(start) - stack = [(start, iter(G[start]))] + stack = [(start, depth_limit, iter(G[start]))] while stack: - parent, children = stack[-1] + parent, depth_now, children = stack[-1] try: child = next(children) if child in visited: @@ -350,7 +408,8 @@ def dfs_labeled_edges(G, source=None): else: yield parent, child, 'forward' visited.add(child) - stack.append((child, iter(G[child]))) + if depth_now > 1: + stack.append((child, depth_now-1, iter(G[child]))) except StopIteration: stack.pop() if stack:
diff --git a/networkx/algorithms/traversal/tests/test_dfs.py b/networkx/algorithms/traversal/tests/test_dfs.py --- a/networkx/algorithms/traversal/tests/test_dfs.py +++ b/networkx/algorithms/traversal/tests/test_dfs.py @@ -77,3 +77,63 @@ def test_dfs_tree_isolates(self): T=nx.dfs_tree(G,source=None) assert_equal(sorted(T.nodes()),[1, 2]) assert_equal(sorted(T.edges()),[]) + + +class TestDepthLimitedSearch: + + def setUp(self): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + self.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + self.D = D + + def dls_test_preorder_nodes(self): + assert_equal(list(nx.dfs_preorder_nodes(self.G, source=0, + depth_limit=2)), [0, 1, 2]) + assert_equal(list(nx.dfs_preorder_nodes(self.D, source=1, + depth_limit=2)), ([1, 0])) + + def dls_test_postorder_nodes(self): + assert_equal(list(nx.dfs_postorder_nodes(self.G, + source=3, depth_limit=3)), [1, 7, 2, 5, 4, 3]) + assert_equal(list(nx.dfs_postorder_nodes(self.D, + source=2, depth_limit=2)),([3, 7, 2])) + + def dls_test_successor(self): + result = nx.dfs_successors(self.G, source=4, depth_limit=3) + assert_equal({n: set(v) for n, v in result.items()}, + {2: {1, 7}, 3: {2}, 4: {3, 5}, 5: {6}}) + result = nx.dfs_successors(self.D, source=7, depth_limit=2) + assert_equal({n: set(v) for n, v in result.items()}, + {8: {9}, 2: {3}, 7: {8, 2}}) + + def dls_test_predecessor(self): + assert_equal(nx.dfs_predecessors(self.G, source=0, depth_limit=3), + {1: 0, 2: 1, 3: 2, 7: 2}) + assert_equal(nx.dfs_predecessors(self.D, source=2, depth_limit=3), + {8: 7, 9: 8, 3: 2, 7: 2}) + + def test_dls_tree(self): + T = nx.dfs_tree(self.G, source=3, depth_limit=1) + assert_equal(sorted(T.edges()), [(3, 2), (3, 4)]) + + def test_dls_edges(self): + edges = nx.dfs_edges(self.G, source=9, depth_limit=4) + assert_equal(list(edges),[(9, 8), (8, 7), + (7, 2), (2, 1), (2, 3), (9, 10)]) + + def test_dls_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) + forward = [(u, v) for (u, v, d) in edges if d == 'forward'] + assert_equal(forward, [(5, 5), (5, 4), (5, 6)]) + + def test_dls_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2)) + forward = [(u, v) for (u, v, d) in edges if d == 'forward'] + assert_equal(forward, [(6, 6), (6, 5), (5, 4)])
Adding Depth Limited Search Adding Depth Limited search as per issue #1912 @dschult @jfinkels please take a look.
2017-07-08T18:22:32
networkx/networkx
2,507
networkx__networkx-2507
[ "2450" ]
a154be0112dfe2e93e61d68549c388893ba5c428
diff --git a/networkx/drawing/nx_agraph.py b/networkx/drawing/nx_agraph.py --- a/networkx/drawing/nx_agraph.py +++ b/networkx/drawing/nx_agraph.py @@ -1,3 +1,11 @@ +# Copyright (C) 2004-2017 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Author: Aric Hagberg ([email protected]) """ *************** Graphviz AGraph @@ -15,14 +23,6 @@ -------- Pygraphviz: http://pygraphviz.github.io/ """ -# Author: Aric Hagberg ([email protected]) - -# Copyright (C) 2004-2016 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. import os import sys import tempfile @@ -34,7 +34,8 @@ 'pygraphviz_layout', 'view_pygraphviz'] -def from_agraph(A,create_using=None): + +def from_agraph(A, create_using=None): """Return a NetworkX Graph or DiGraph from a PyGraphviz graph. Parameters @@ -69,45 +70,49 @@ def from_agraph(A,create_using=None): if create_using is None: if A.is_directed(): if A.is_strict(): - create_using=nx.DiGraph() + create_using = nx.DiGraph() else: - create_using=nx.MultiDiGraph() + create_using = nx.MultiDiGraph() else: if A.is_strict(): - create_using=nx.Graph() + create_using = nx.Graph() else: - create_using=nx.MultiGraph() + create_using = nx.MultiGraph() # assign defaults - N=nx.empty_graph(0,create_using) - N.name='' + N = nx.empty_graph(0, create_using) + N.name = '' if A.name is not None: - N.name=A.name + N.name = A.name + + # add graph attributes + N.graph.update(A.graph_attr) # add nodes, attributes to N.node_attr for n in A.nodes(): - str_attr=dict((str(k),v) for k,v in n.attr.items()) - N.add_node(str(n),**str_attr) + str_attr = {str(k): v for k, v in n.attr.items()} + N.add_node(str(n), **str_attr) # add edges, assign edge data as dictionary of attributes for e in A.edges(): - u,v=str(e[0]),str(e[1]) - attr=dict(e.attr) - str_attr=dict((str(k),v) for k,v in attr.items()) + u, v = str(e[0]), str(e[1]) + attr = dict(e.attr) + str_attr = {str(k): v for k, v in attr.items()} if not N.is_multigraph(): if e.name is not None: - str_attr['key']=e.name - N.add_edge(u,v,**str_attr) + str_attr['key'] = e.name + N.add_edge(u, v, **str_attr) else: - N.add_edge(u,v,key=e.name,**str_attr) + N.add_edge(u, v, key=e.name, **str_attr) # add default attributes for graph, nodes, and edges # hang them on N.graph_attr - N.graph['graph']=dict(A.graph_attr) - N.graph['node']=dict(A.node_attr) - N.graph['edge']=dict(A.edge_attr) + N.graph['graph'] = dict(A.graph_attr) + N.graph['node'] = dict(A.node_attr) + N.graph['edge'] = dict(A.edge_attr) return N + def to_agraph(N): """Return a pygraphviz graph from a NetworkX graph N. @@ -133,34 +138,36 @@ def to_agraph(N): except ImportError: raise ImportError('requires pygraphviz ', 'http://pygraphviz.github.io/') - directed=N.is_directed() - strict=N.number_of_selfloops()==0 and not N.is_multigraph() - A=pygraphviz.AGraph(name=N.name,strict=strict,directed=directed) + directed = N.is_directed() + strict = N.number_of_selfloops() == 0 and not N.is_multigraph() + A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed) # default graph attributes - A.graph_attr.update(N.graph.get('graph',{})) - A.node_attr.update(N.graph.get('node',{})) - A.edge_attr.update(N.graph.get('edge',{})) + A.graph_attr.update(N.graph.get('graph', {})) + A.node_attr.update(N.graph.get('node', {})) + A.edge_attr.update(N.graph.get('edge', {})) + + A.graph_attr.update(N.graph) # add nodes - for n,nodedata in N.nodes(data=True): - A.add_node(n,**nodedata) + for n, nodedata in N.nodes(data=True): + A.add_node(n, **nodedata) # loop over edges if N.is_multigraph(): - for u,v,key,edgedata in N.edges(data=True,keys=True): - str_edgedata=dict((k,str(v)) for k,v in edgedata.items() if k != 'key') - A.add_edge(u,v,key=str(key),**str_edgedata) + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edata = {k: str(v) for k, v in edgedata.items() if k != 'key'} + A.add_edge(u, v, key=str(key), **str_edata) else: - for u,v,edgedata in N.edges(data=True): - str_edgedata=dict((k,str(v)) for k,v in edgedata.items()) - A.add_edge(u,v,**str_edgedata) - + for u, v, edgedata in N.edges(data=True): + str_edgedata = {k: str(v) for k, v in edgedata.items()} + A.add_edge(u, v, **str_edgedata) return A -def write_dot(G,path): + +def write_dot(G, path): """Write NetworkX graph G to Graphviz dot format on path. Parameters @@ -175,11 +182,12 @@ def write_dot(G,path): except ImportError: raise ImportError('requires pygraphviz ', 'http://pygraphviz.github.io/') - A=to_agraph(G) + A = to_agraph(G) A.write(path) A.clear() return + def read_dot(path): """Return a NetworkX graph from a dot file on path. @@ -193,11 +201,11 @@ def read_dot(path): except ImportError: raise ImportError('read_dot() requires pygraphviz ', 'http://pygraphviz.github.io/') - A=pygraphviz.AGraph(file=path) + A = pygraphviz.AGraph(file=path) return from_agraph(A) -def graphviz_layout(G,prog='neato',root=None, args=''): +def graphviz_layout(G, prog='neato', root=None, args=''): """Create node positions for G using Graphviz. Parameters @@ -212,7 +220,7 @@ def graphviz_layout(G,prog='neato',root=None, args=''): Extra arguments to Graphviz layout program Returns : dictionary - Dictionary of x,y, positions keyed by node. + Dictionary of x, y, positions keyed by node. Examples -------- @@ -225,9 +233,10 @@ def graphviz_layout(G,prog='neato',root=None, args=''): This is a wrapper for pygraphviz_layout. """ - return pygraphviz_layout(G,prog=prog,root=root,args=args) + return pygraphviz_layout(G, prog=prog, root=root, args=args) + -def pygraphviz_layout(G,prog='neato',root=None, args=''): +def pygraphviz_layout(G, prog='neato', root=None, args=''): """Create node positions for G using Graphviz. Parameters @@ -242,7 +251,7 @@ def pygraphviz_layout(G,prog='neato',root=None, args=''): Extra arguments to Graphviz layout program Returns : dictionary - Dictionary of x,y, positions keyed by node. + Dictionary of x, y, positions keyed by node. Examples -------- @@ -257,23 +266,24 @@ def pygraphviz_layout(G,prog='neato',root=None, args=''): raise ImportError('requires pygraphviz ', 'http://pygraphviz.github.io/') if root is not None: - args+="-Groot=%s"%root - A=to_agraph(G) - A.layout(prog=prog,args=args) - node_pos={} + args += "-Groot=%s" % root + A = to_agraph(G) + A.layout(prog=prog, args=args) + node_pos = {} for n in G: - node=pygraphviz.Node(A,n) + node = pygraphviz.Node(A, n) try: - xx,yy=node.attr["pos"].split(',') - node_pos[n]=(float(xx),float(yy)) + xx, yy = node.attr["pos"].split(',') + node_pos[n] = (float(xx), float(yy)) except: - print("no position for node",n) - node_pos[n]=(0.0,0.0) + print("no position for node", n) + node_pos[n] = (0.0, 0.0) return node_pos + @nx.utils.open_file(5, 'w') def view_pygraphviz(G, edgelabel=None, prog='dot', args='', - suffix='', path=None): + suffix='', path=None): """Views the graph G using the specified layout algorithm. Parameters @@ -340,7 +350,7 @@ def view_pygraphviz(G, edgelabel=None, prog='dot', args='', def update_attrs(which, attrs): # Update graph attributes. Return list of those which were added. added = [] - for k,v in attrs.items(): + for k, v in attrs.items(): if k not in G.graph[which]: G.graph[which][k] = v added.append(k) @@ -375,13 +385,13 @@ def func(data): # update all the edge labels if G.is_multigraph(): - for u,v,key,data in G.edges(keys=True, data=True): + for u, v, key, data in G.edges(keys=True, data=True): # PyGraphviz doesn't convert the key to a string. See #339 - edge = A.get_edge(u,v,str(key)) + edge = A.get_edge(u, v, str(key)) edge.attr['label'] = str(func(data)) else: - for u,v,data in G.edges(data=True): - edge = A.get_edge(u,v) + for u, v, data in G.edges(data=True): + edge = A.get_edge(u, v) edge.attr['label'] = str(func(data)) if path is None: @@ -399,6 +409,7 @@ def func(data): return path.name, A + def display_pygraphviz(graph, path, format=None, prog=None, args=''): """Internal function to display a graph in OS dependent manner. @@ -436,6 +447,7 @@ def display_pygraphviz(graph, path, format=None, prog=None, args=''): path.close() nx.utils.default_opener(filename) + # fixture for nose tests def setup_module(module): from nose import SkipTest
diff --git a/networkx/drawing/tests/test_agraph.py b/networkx/drawing/tests/test_agraph.py --- a/networkx/drawing/tests/test_agraph.py +++ b/networkx/drawing/tests/test_agraph.py @@ -2,7 +2,7 @@ import os import tempfile from nose import SkipTest -from nose.tools import assert_true,assert_equal +from nose.tools import assert_true, assert_equal import networkx as nx @@ -16,13 +16,16 @@ def setupClass(cls): raise SkipTest('PyGraphviz not available.') def build_graph(self, G): - G.add_edges_from([('A','B'),('A','C'),('A','C'),('B','C'),('A','D')]) + edges = [('A', 'B'), ('A', 'C'), ('A', 'C'), ('B', 'C'), ('A', 'D')] + G.add_edges_from(edges) G.add_node('E') + G.graph['metal'] = 'bronze' return G def assert_equal(self, G1, G2): - assert_true( sorted(G1.nodes()) == sorted(G2.nodes()) ) - assert_true( sorted(G1.edges()) == sorted(G2.edges()) ) + assert_equal(sorted(G1.nodes()), sorted(G2.nodes())) + assert_equal(sorted(G1.edges()), sorted(G2.edges())) + assert_equal(G1.graph['metal'], G2.graph['metal']) def agraph_checks(self, G): G = self.build_graph(G) @@ -36,7 +39,7 @@ def agraph_checks(self, G): os.unlink(fname) self.assert_equal(H, Hin) - (fd,fname) = tempfile.mkstemp() + (fd, fname) = tempfile.mkstemp() with open(fname, 'w') as fh: nx.drawing.nx_agraph.write_dot(H, fh)
discrepancy between documentation and implementation of nx_agraph.to_agraph function [Documentation](http://networkx.readthedocs.io/en/networkx-1.11/tutorial/tutorial.html#graph-attributes) says to add graph attribute I should do: ``` >>> G = nx.Graph(day="Friday") >>> G.graph {'day': 'Friday'} ``` or ``` >>> G.graph['day']='Monday' >>> G.graph {'day': 'Monday'} ``` Yet based on [the code that converts to an `pygraphviz.AGraph`](https://github.com/networkx/networkx/blob/v1.11/networkx/drawing/nx_agraph.py#L140-L143) it seems like the following would be more appropriate: ``` >>> G = nx.Graph(graph={"day":"Friday"}) >>> G.graph['graph'] {'day': 'Friday'} ``` or ``` >>> G.graph['graph'] = dict() >>> G.graph['graph']['day']='Monday' >>> G.graph['graph'] {'day': 'Monday'} ``` This is actually how I have to construct my `nx.DiGraph` in order to end up with a `pygraphviz.AGraph` that actually includes the `label` attribute that I want to give it.
After thinking this over for a few minutes, it seems most likely that the bug is in `to_agraph` (and maybe also `from_agraph`) so I might put some thought into test cases and a fix for this problem in the near future.
2017-07-10T03:42:42
networkx/networkx
2,508
networkx__networkx-2508
[ "2248" ]
a154be0112dfe2e93e61d68549c388893ba5c428
diff --git a/networkx/algorithms/shortest_paths/weighted.py b/networkx/algorithms/shortest_paths/weighted.py --- a/networkx/algorithms/shortest_paths/weighted.py +++ b/networkx/algorithms/shortest_paths/weighted.py @@ -47,6 +47,7 @@ 'goldberg_radzik', 'johnson'] + def _weight_function(G, weight): """Returns a function that returns the weight of an edge. @@ -86,6 +87,7 @@ def _weight_function(G, weight): return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values()) return lambda u, v, data: data.get(weight, 1) + def dijkstra_path(G, source, target, weight='weight'): """Returns the shortest weighted path from source to target in G. @@ -157,11 +159,7 @@ def dijkstra_path(G, source, target, weight='weight'): """ (length, path) = single_source_dijkstra(G, source, target=target, weight=weight) - try: - return path[target] - except KeyError: - raise nx.NetworkXNoPath( - "node %s not reachable from %s" % (target, source)) + return path def dijkstra_path_length(G, source, target, weight='weight'): @@ -398,16 +396,19 @@ def single_source_dijkstra(G, source, target=None, cutoff=None, Returns ------- - distance,path : dictionaries - Returns a tuple of two dictionaries keyed by node. - The first dictionary stores distance from the source. - The second stores the path from the source to that node. + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. Examples -------- - >>> G=nx.path_graph(5) - >>> length, path=nx.single_source_dijkstra(G, 0) + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_dijkstra(G, 0) >>> print(length[4]) 4 >>> for node in [0, 1, 2, 3, 4]: @@ -419,6 +420,11 @@ def single_source_dijkstra(G, source, target=None, cutoff=None, 4: 4 >>> path[4] [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_dijkstra(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] Notes ----- @@ -596,7 +602,7 @@ def multi_source_dijkstra_path_length(G, sources, cutoff=None, def multi_source_dijkstra(G, sources, target=None, cutoff=None, - weight='weight'): + weight='weight'): """Find shortest weighted paths and lengths from a given set of source nodes. @@ -635,10 +641,13 @@ def multi_source_dijkstra(G, sources, target=None, cutoff=None, Returns ------- - distance, path : pair of dictionaries - Returns a tuple of two dictionaries keyed by node. + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. The first dictionary stores distance from one of the source nodes. The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. Examples -------- @@ -656,6 +665,12 @@ def multi_source_dijkstra(G, sources, target=None, cutoff=None, >>> path[3] [4, 3] + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1) + >>> length + 1 + >>> path + [0, 1] + Notes ----- Edge weight attributes must be numerical. @@ -686,12 +701,17 @@ def multi_source_dijkstra(G, sources, target=None, cutoff=None, if not sources: raise ValueError('sources must not be empty') if target in sources: - return ({target: 0}, {target: [target]}) + return (0, [target]) weight = _weight_function(G, weight) paths = {source: [source] for source in sources} # dictionary of paths dist = _dijkstra_multisource(G, sources, weight, paths=paths, cutoff=cutoff, target=target) - return (dist, paths) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError: + raise nx.NetworkXNoPath("No path to {}.".format(target)) def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, @@ -836,6 +856,8 @@ def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight='weight'): pred, distance : dictionaries Returns two dictionaries representing a list of predecessors of a node and the distance to each node. + Warning: If target is specified, the dicts are incomplete as they + only contain information for the nodes along a path to target. Notes ----- @@ -844,6 +866,22 @@ def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight='weight'): The list of predecessors contains more than one element only when there are more than one shortest paths to the key node. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.path_graph(5, create_using = nx.DiGraph()) + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0])] + >>> sorted(dist.items()) + [(0, 0), (1, 1)] """ weight = _weight_function(G, weight) @@ -957,17 +995,20 @@ def all_pairs_dijkstra_path(G, cutoff=None, weight='weight'): # TODO This can be trivially parallelized. return {n: path(G, n, cutoff=cutoff, weight=weight) for n in G} -def bellman_ford(G, source, weight='weight'): - """DEPRECATED: Has been replaced by function bellman_ford_predecessor_and_distance(). +def bellman_ford(G, source, weight='weight'): + """DEPRECATED: Replaced by bellman_ford_predecessor_and_distance(). """ - _warnings.warn("Function bellman_ford() is deprecated, use function bellman_ford_predecessor_and_distance() instead.", - DeprecationWarning) + msg = "Function bellman_ford() is deprecated, use " \ + "bellman_ford_predecessor_and_distance() instead." + _warnings.warn(msg, DeprecationWarning) return bellman_ford_predecessor_and_distance(G, source, weight=weight) -def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, weight='weight'): + +def bellman_ford_predecessor_and_distance(G, source, target=None, + cutoff=None, weight='weight'): """Compute shortest path lengths and predecessors on shortest paths in weighted graphs. @@ -1002,6 +1043,8 @@ def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, w pred, dist : dictionaries Returns two dictionaries keyed by node to predecessor in the path and to the distance from the source respectively. + Warning: If target is specified, the dicts are incomplete as they + only contain information for the nodes along a path to target. Raises ------ @@ -1021,6 +1064,12 @@ def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, w >>> sorted(dist.items()) [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, [None]), (1, [0])] + >>> sorted(dist.items()) + [(0, 0), (1, 1)] + >>> from nose.tools import assert_raises >>> G = nx.cycle_graph(5, create_using = nx.DiGraph()) >>> G[1][2]['weight'] = -7 @@ -1054,7 +1103,9 @@ def bellman_ford_predecessor_and_distance(G, source, target=None, cutoff=None, w weight = _weight_function(G, weight) - return (pred, _bellman_ford(G, [source], weight,pred=pred, dist=dist, cutoff=cutoff, target=target)) + dist = _bellman_ford(G, [source], weight, pred=pred, dist=dist, + cutoff=cutoff, target=target) + return (pred, dist) def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, @@ -1096,8 +1147,8 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, Returns ------- - Returns two dictionaries keyed by node to predecessor in the - path and to the distance from the source respectively. + Returns a dict keyed by node to the distance from the source. + Dicts for paths and pred are in the mutated input dicts by those names. Raises ------ @@ -1168,9 +1219,9 @@ def _bellman_ford(G, source, weight, pred=None, paths=None, dist=None, path.reverse() paths[dst] = path - return dist + def bellman_ford_path(G, source, target, weight='weight'): """Returns the shortest path from source to target in a weighted graph G. @@ -1200,7 +1251,7 @@ def bellman_ford_path(G, source, target, weight='weight'): Examples -------- >>> G=nx.path_graph(5) - >>> print(nx.bellman_ford_path(G,0,4)) + >>> print(nx.bellman_ford_path(G, 0, 4)) [0, 1, 2, 3, 4] Notes @@ -1212,12 +1263,10 @@ def bellman_ford_path(G, source, target, weight='weight'): -------- dijkstra_path(), bellman_ford_path_length() """ - (lengths, paths) = single_source_bellman_ford(G, source, target=target, weight=weight) - try: - return paths[target] - except KeyError: - raise nx.NetworkXNoPath( - "Node %s not reachable from %s" % (source, target)) + length, path = single_source_bellman_ford(G, source, + target=target, weight=weight) + return path + def bellman_ford_path_length(G, source, target, weight='weight'): """Returns the shortest path length from source to target @@ -1266,7 +1315,7 @@ def bellman_ford_path_length(G, source, target, weight='weight'): weight = _weight_function(G, weight) - length = _bellman_ford(G, [source], weight, target=target) + length = _bellman_ford(G, [source], weight, target=target) try: return length[target] @@ -1274,6 +1323,7 @@ def bellman_ford_path_length(G, source, target, weight='weight'): raise nx.NetworkXNoPath( "node %s not reachable from %s" % (source, target)) + def single_source_bellman_ford_path(G, source, cutoff=None, weight='weight'): """Compute shortest path between source and all other reachable nodes for a weighted graph. @@ -1317,7 +1367,9 @@ def single_source_bellman_ford_path(G, source, cutoff=None, weight='weight'): G, source, cutoff=cutoff, weight=weight) return path -def single_source_bellman_ford_path_length(G, source, cutoff=None, weight='weight'): + +def single_source_bellman_ford_path_length(G, source, + cutoff=None, weight='weight'): """Compute the shortest path length between source and all other reachable nodes for a weighted graph. @@ -1367,7 +1419,9 @@ def single_source_bellman_ford_path_length(G, source, cutoff=None, weight='weigh return iter(_bellman_ford(G, [source], weight, cutoff=cutoff).items()) -def single_source_bellman_ford(G, source, target=None, cutoff=None, weight='weight'): + +def single_source_bellman_ford(G, source, + target=None, cutoff=None, weight='weight'): """Compute shortest paths and lengths in a weighted graph G. Uses Bellman-Ford algorithm for shortest paths. @@ -1387,16 +1441,19 @@ def single_source_bellman_ford(G, source, target=None, cutoff=None, weight='weig Returns ------- - distance,path : dictionaries - Returns a tuple of two dictionaries keyed by node. - The first dictionary stores distance from the source. - The second stores the path from the source to that node. + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. Examples -------- - >>> G=nx.path_graph(5) - >>> length,path=nx.single_source_bellman_ford(G,0) + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_bellman_ford(G, 0) >>> print(length[4]) 4 >>> for node in [0, 1, 2, 3, 4]: @@ -1408,6 +1465,11 @@ def single_source_bellman_ford(G, source, target=None, cutoff=None, weight='weig 4: 4 >>> path[4] [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_bellman_ford(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] Notes ----- @@ -1421,13 +1483,21 @@ def single_source_bellman_ford(G, source, target=None, cutoff=None, weight='weig single_source_bellman_ford_path_length() """ if source == target: - return ({source: 0}, {source: [source]}) + return (0, [source]) weight = _weight_function(G, weight) paths = {source: [source]} # dictionary of paths - return (_bellman_ford(G, [source], weight, paths=paths, cutoff=cutoff, - target=target), paths) + dist = _bellman_ford(G, [source], weight, paths=paths, cutoff=cutoff, + target=target) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError: + msg = "Node %s not reachable from %s" % (source, target) + raise nx.NetworkXNoPath(msg) + def all_pairs_bellman_ford_path_length(G, cutoff=None, weight='weight'): """ Compute shortest path lengths between all nodes in a weighted graph. @@ -1475,6 +1545,7 @@ def all_pairs_bellman_ford_path_length(G, cutoff=None, weight='weight'): for n in G: yield (n, dict(length(G, n, cutoff=cutoff, weight=weight))) + def all_pairs_bellman_ford_path(G, cutoff=None, weight='weight'): """ Compute shortest paths between all nodes in a weighted graph. @@ -1514,6 +1585,7 @@ def all_pairs_bellman_ford_path(G, cutoff=None, weight='weight'): # TODO This can be trivially parallelized. return {n: path(G, n, cutoff=cutoff, weight=weight) for n in G} + def goldberg_radzik(G, source, weight='weight'): """Compute shortest path lengths and predecessors on shortest paths in weighted graphs. @@ -1732,10 +1804,10 @@ def negative_edge_cycle(G, weight='weight'): Edge weight attributes must be numerical. Distances are calculated as sums of weighted edges traversed. - This algorithm uses bellman_ford_predecessor_and_distance() but finds negative cycles - on any component by first adding a new node connected to - every node, and starting bellman_ford_predecessor_and_distance on that node. It then - removes that extra node. + This algorithm uses bellman_ford_predecessor_and_distance() but finds + negative cycles on any component by first adding a new node connected to + every node, and starting bellman_ford_predecessor_and_distance on that + node. It then removes that extra node. """ newnode = generate_unique_node() G.add_edges_from([(newnode, n) for n in G]) @@ -1777,12 +1849,9 @@ def bidirectional_dijkstra(G, source, target, weight='weight'): Returns ------- - length : number - Shortest path length. - - Returns a tuple of two dictionaries keyed by node. - The first dictionary stores distance from the source. - The second stores the path from the source to that node. + length, path : number and list + length is the distance from source to target. + path is a list of nodes on a path from source to target. Raises ------ @@ -1791,8 +1860,8 @@ def bidirectional_dijkstra(G, source, target, weight='weight'): Examples -------- - >>> G=nx.path_graph(5) - >>> length,path=nx.bidirectional_dijkstra(G,0,4) + >>> G = nx.path_graph(5) + >>> length, path = nx.bidirectional_dijkstra(G, 0, 4) >>> print(length) 4 >>> print(path) @@ -1831,10 +1900,9 @@ def bidirectional_dijkstra(G, source, target, weight='weight'): push = heappush pop = heappop # Init: [Forward, Backward] - dists = [{}, {}] # dictionary of final distances + dists = [{}, {}] # dictionary of final distances paths = [{source: [source]}, {target: [target]}] # dictionary of paths - fringe = [[], []] # heap of (distance, node) tuples - # for choosing next node to expand + fringe = [[], []] # heap of (distance, node) for choosing node to expand seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes c = count() # initialize fringe heap @@ -1978,10 +2046,11 @@ def johnson(G, weight='weight'): # Calculate distance of shortest paths dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist) + # Update the weight function to take into account the Bellman--Ford # relaxation distances. - scale = lambda u, v: dist_bellman[u] - dist_bellman[v] - new_weight = lambda u, v, d: weight(u, v, d) + scale(u, v) + def new_weight(u, v, d): + return weight(u, v, d) + dist_bellman[u] - dist_bellman[v] def dist_path(v): paths = {v: [v]} @@ -1989,4 +2058,3 @@ def dist_path(v): return paths return {v: dist_path(v) for v in G} -
diff --git a/networkx/algorithms/shortest_paths/tests/test_weighted.py b/networkx/algorithms/shortest_paths/tests/test_weighted.py --- a/networkx/algorithms/shortest_paths/tests/test_weighted.py +++ b/networkx/algorithms/shortest_paths/tests/test_weighted.py @@ -108,7 +108,7 @@ def test_dijkstra(self): assert_equal(nx.dijkstra_path_length(self.XG4, 0, 2), 4) validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2)) validate_path( - self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's', 'v')[1]['v']) + self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's', 'v')[1]) validate_path( self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's')[1]['v']) @@ -123,8 +123,7 @@ def test_dijkstra(self): validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3)) validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4)) - assert_equal( - nx.single_source_dijkstra(self.cycle, 0, 0), ({0: 0}, {0: [0]})) + assert_equal(nx.single_source_dijkstra(self.cycle, 0, 0), (0, [0])) def test_bidirectional_dijkstra(self): validate_length_path( @@ -231,14 +230,14 @@ def test_weight_function(self): weight = lambda u, v, d: 1 / d['weight'] # The shortest path from 0 to 2 using the actual weights on the # edges should be [0, 1, 2]. - distances, paths = nx.single_source_dijkstra(G, 0, 2) - assert_equal(distances[2], 2) - assert_equal(paths[2], [0, 1, 2]) + distance, path = nx.single_source_dijkstra(G, 0, 2) + assert_equal(distance, 2) + assert_equal(path, [0, 1, 2]) # However, with the above weight function, the shortest path # should be [0, 2], since that has a very small weight. - distances, paths = nx.single_source_dijkstra(G, 0, 2, weight=weight) - assert_equal(distances[2], 1 / 10) - assert_equal(paths[2], [0, 2]) + distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight) + assert_equal(distance, 1 / 10) + assert_equal(path, [0, 2]) class TestDijkstraPathLength(object): @@ -397,21 +396,21 @@ def test_multigraph(self): assert_equal(nx.single_source_bellman_ford_path(self.MXG, 's')['v'], ['s', 'x', 'u', 'v']) assert_equal(dict(nx.single_source_bellman_ford_path_length(self.MXG, 's'))['v'], 9) D, P = nx.single_source_bellman_ford(self.MXG, 's', target='v') - assert_equal(D['v'], 9) - assert_equal(P['v'], ['s', 'x', 'u', 'v']) + assert_equal(D, 9) + assert_equal(P, ['s', 'x', 'u', 'v']) P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, 's') assert_equal(P['v'], ['u']) assert_equal(D['v'], 9) P, D = nx.goldberg_radzik(self.MXG, 's') assert_equal(P['v'], 'u') - assert_equal(D['v'], 9) + assert_equal(D['v'], 9) assert_equal(nx.bellman_ford_path(self.MXG4, 0, 2), [0, 1, 2]) assert_equal(nx.bellman_ford_path_length(self.MXG4, 0, 2), 4) assert_equal(nx.single_source_bellman_ford_path(self.MXG4, 0)[2], [0, 1, 2]) assert_equal(dict(nx.single_source_bellman_ford_path_length(self.MXG4, 0))[2], 4) D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2) - assert_equal(D[2], 4) - assert_equal(P[2], [0, 1, 2]) + assert_equal(D, 4) + assert_equal(P, [0, 1, 2]) P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0) assert_equal(P[2], [1]) assert_equal(D[2], 4) @@ -425,8 +424,8 @@ def test_others(self): assert_equal(nx.single_source_bellman_ford_path(self.XG, 's')['v'], ['s', 'x', 'u', 'v']) assert_equal(dict(nx.single_source_bellman_ford_path_length(self.XG, 's'))['v'], 9) D, P = nx.single_source_bellman_ford(self.XG, 's', target='v') - assert_equal(D['v'], 9) - assert_equal(P['v'], ['s', 'x', 'u', 'v']) + assert_equal(D, 9) + assert_equal(P, ['s', 'x', 'u', 'v']) (P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, 's') assert_equal(P['v'], ['u']) assert_equal(D['v'], 9)
Single source dijkstra has confusing output if target is specified I would expect `single_source_dijkstra(G, source=u, target=v)` to return a pair in which the left element is the length of the shortest path from `u` to `v` and the right element is the shortest path from `u` to `v`. However, the return value seems to be the dictionary of lengths keyed by destination node and the dictionary of paths keyed by destination node that together represent the state of Dijkstra's algorithm at the point that the target node was found. Should this be changed?
It seems the implementation isn't quite complete for using source and target together.
2017-07-10T04:53:20
networkx/networkx
2,515
networkx__networkx-2515
[ "448" ]
7a138a1bab6e9eefa92883dfa11fcb45dc8347ca
diff --git a/networkx/readwrite/graphml.py b/networkx/readwrite/graphml.py --- a/networkx/readwrite/graphml.py +++ b/networkx/readwrite/graphml.py @@ -1,3 +1,12 @@ +# Copyright (C) 2008-2017 by +# Aric Hagberg <[email protected]> +# Dan Schult <[email protected]> +# Pieter Swart <[email protected]> +# All rights reserved. +# BSD license. +# +# Authors: Salim Fadhley +# Aric Hagberg ([email protected]) """ ******* GraphML @@ -34,37 +43,41 @@ http://graphml.graphdrawing.org/primer/graphml-primer.html for examples. """ -__author__ = """\n""".join(['Salim Fadhley', - 'Aric Hagberg ([email protected])' - ]) - -__all__ = ['write_graphml', 'read_graphml', 'generate_graphml', - 'parse_graphml', 'GraphMLWriter', 'GraphMLReader'] - -from collections import defaultdict -import networkx as nx -from networkx.utils import open_file, make_str import warnings +from collections import defaultdict + try: - from xml.etree.cElementTree import Element, ElementTree, tostring, fromstring + from xml.etree.cElementTree import Element, ElementTree + from xml.etree.cElementTree import tostring, fromstring except ImportError: try: - from xml.etree.ElementTree import Element, ElementTree, tostring, fromstring + from xml.etree.ElementTree import Element, ElementTree + from xml.etree.ElementTree import tostring, fromstring except ImportError: pass -@open_file(1,mode='wb') -def write_graphml(G, path, encoding='utf-8', prettyprint=True, infer_numeric_types=False): +try: + import lxml.etree as lxmletree +except ImportError: + lxmletree = None + +import networkx as nx +from networkx.utils import open_file, make_str + +__all__ = ['write_graphml', 'read_graphml', 'generate_graphml', + 'write_graphml_xml', 'write_graphml_lxml', + 'parse_graphml', 'GraphMLWriter', 'GraphMLReader'] + + +@open_file(1, mode='wb') +def write_graphml_xml(G, path, encoding='utf-8', prettyprint=True, + infer_numeric_types=False): """Write G in GraphML XML format to path Parameters ---------- G : graph A networkx graph - infer_numeric_types : boolean - Determine if numeric types should be generalized despite different python values. - For example, if edges have both int and float 'weight' attributes, it will be - inferred in GraphML that they are both floats (which translates to double in GraphML). path : file or string File or filename to write. Filenames ending in .gz or .bz2 will be compressed. @@ -72,38 +85,87 @@ def write_graphml(G, path, encoding='utf-8', prettyprint=True, infer_numeric_typ Encoding for text data. prettyprint : bool (optional) If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. Examples -------- - >>> G=nx.path_graph(4) + >>> G = nx.path_graph(4) >>> nx.write_graphml(G, "test.graphml") Notes ----- - This implementation does not support mixed graphs (directed and unidirected - edges together) hyperedges, nested graphs, or ports. + It may be a good idea in Python2 to convert strings to unicode + before giving the graph to write_gml. At least the strings with + either many characters to escape. + + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. """ - writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint,infer_numeric_types=infer_numeric_types) + writer = GraphMLWriter(encoding=encoding, prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types) writer.add_graph_element(G) writer.dump(path) -def generate_graphml(G, encoding='utf-8',prettyprint=True): - """Generate GraphML lines for G +@open_file(1, mode='wb') +def write_graphml_lxml(G, path, encoding='utf-8', prettyprint=True, + infer_numeric_types=False): + """Write G in GraphML XML format to path + + This function uses the LXML framework and should be faster than + the version using the xml library. Parameters ---------- G : graph A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. encoding : string (optional) Encoding for text data. prettyprint : bool (optional) If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. Examples -------- >>> G=nx.path_graph(4) - >>> linefeed=chr(10) # linefeed=\n - >>> s=linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP + >>> nx.write_graphml_lxml(G, "fourpath.graphml") # doctest: +SKIP + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriterLxml(path, graph=G, encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types) + writer.dump() + + +def generate_graphml(G, encoding='utf-8', prettyprint=True): + """Generate GraphML lines for G + + Parameters + ---------- + G : graph + A networkx graph + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP >>> for line in nx.generate_graphml(G): # doctest: +SKIP ... print(line) @@ -112,13 +174,14 @@ def generate_graphml(G, encoding='utf-8',prettyprint=True): This implementation does not support mixed graphs (directed and unidirected edges together) hyperedges, nested graphs, or ports. """ - writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint) + writer = GraphMLWriter(encoding=encoding, prettyprint=prettyprint) writer.add_graph_element(G) for line in str(writer).splitlines(): yield line -@open_file(0,mode='rb') -def read_graphml(path,node_type=str): + +@open_file(0, mode='rb') +def read_graphml(path, node_type=str): """Read graph in GraphML format from path. Parameters @@ -138,6 +201,19 @@ def read_graphml(path,node_type=str): Notes ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph['node_default']['color'] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if 'color' not in data: + ... data['color']=default_color + >>> default_color = G.graph['edge_default']['color'] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if 'color' not in data: + ... data['color']=default_color + This implementation does not support mixed graphs (directed and unidirected edges together), hypergraphs, nested graphs, or ports. @@ -155,11 +231,20 @@ def read_graphml(path,node_type=str): """ reader = GraphMLReader(node_type=node_type) # need to check for multiple graphs - glist=list(reader(path=path)) + glist = list(reader(path=path)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = b'<graphml xmlns="http://graphml.graphdrawing.org/xmlns">' + path.seek(0) + old_bytes = path.read() + new_bytes = old_bytes.replace(b'<graphml>', header) + glist = list(reader(string=new_bytes)) + if len(glist) == 0: + raise nx.NetworkXError('file not successfully read as graphml') return glist[0] -def parse_graphml(graphml_string,node_type=str): +def parse_graphml(graphml_string, node_type=str): """Read graph in GraphML format from string. Parameters @@ -179,13 +264,26 @@ def parse_graphml(graphml_string,node_type=str): Examples -------- - >>> G=nx.path_graph(4) - >>> linefeed=chr(10) # linefeed=\n - >>> s=linefeed.join(nx.generate_graphml(G)) - >>> H=nx.parse_graphml(s) + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> H = nx.parse_graphml(s) Notes ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph['node_default']['color'] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if 'color' not in data: + ... data['color']=default_color + >>> default_color = G.graph['edge_default']['color'] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if 'color' not in data: + ... data['color']=default_color + This implementation does not support mixed graphs (directed and unidirected edges together), hypergraphs, nested graphs, or ports. @@ -197,14 +295,21 @@ def parse_graphml(graphml_string,node_type=str): """ reader = GraphMLReader(node_type=node_type) # need to check for multiple graphs - glist=list(reader(string=graphml_string)) + glist = list(reader(string=graphml_string)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = '<graphml xmlns="http://graphml.graphdrawing.org/xmlns">' + new_string = graphml_string.replace('<graphml>', header) + glist = list(reader(string=new_string)) + if len(glist) == 0: + raise nx.NetworkXError('file not successfully read as graphml') return glist[0] class GraphML(object): NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns" NS_XSI = "http://www.w3.org/2001/XMLSchema-instance" - #xmlns:y="http://www.yworks.com/xml/graphml" + # xmlns:y="http://www.yworks.com/xml/graphml" NS_Y = "http://www.yworks.com/xml/graphml" SCHEMALOCATION = \ ' '.join(['http://graphml.graphdrawing.org/xmlns', @@ -218,19 +323,19 @@ class GraphML(object): # Python 2.x pass - types=[(int,"integer"), # for Gephi GraphML bug - (str,"yfiles"),(str,"string"), (unicode,"string"), - (int,"int"), (long,"long"), - (float,"float"), (float,"double"), - (bool, "boolean")] + types = [(int, "integer"), # for Gephi GraphML bug + (str, "yfiles"), (str, "string"), (unicode, "string"), + (int, "int"), (long, "long"), + (float, "float"), (float, "double"), + (bool, "boolean")] xml_type = dict(types) python_type = dict(reversed(a) for a in types) # This page says that data types in GraphML follow Java(TM). - # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition + # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition # true and false are the only boolean literals: - # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals + # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals convert_bool = { # We use data.lower() in actual use. 'true': True, 'false': False, @@ -239,33 +344,35 @@ class GraphML(object): '1': True, 1: True } + class GraphMLWriter(GraphML): - def __init__(self, graph=None, encoding="utf-8", prettyprint=True, infer_numeric_types=False): + def __init__(self, graph=None, encoding="utf-8", prettyprint=True, + infer_numeric_types=False): try: import xml.etree.ElementTree except ImportError: - raise ImportError('GraphML writer requires ' - 'xml.elementtree.ElementTree') + msg = 'GraphML writer requires xml.elementtree.ElementTree' + raise ImportError(msg) + self.myElement = Element + self.infer_numeric_types = infer_numeric_types - self.prettyprint=prettyprint + self.prettyprint = prettyprint self.encoding = encoding - self.xml = Element("graphml", - {'xmlns':self.NS_GRAPHML, - 'xmlns:xsi':self.NS_XSI, - 'xsi:schemaLocation':self.SCHEMALOCATION} - ) - self.keys={} + self.xml = self.myElement("graphml", + {'xmlns': self.NS_GRAPHML, + 'xmlns:xsi': self.NS_XSI, + 'xsi:schemaLocation': self.SCHEMALOCATION}) + self.keys = {} self.attributes = defaultdict(list) self.attribute_types = defaultdict(set) if graph is not None: self.add_graph_element(graph) - def __str__(self): if self.prettyprint: self.indent(self.xml) - s=tostring(self.xml).decode(self.encoding) + s = tostring(self.xml).decode(self.encoding) return s def attr_type(self, name, scope, value): @@ -281,17 +388,23 @@ def attr_type(self, name, scope, value): types = self.attribute_types[(name, scope)] try: - chr(12345) # Fails on Py!=3. - long = int # Py3K's int is our long type + chr(12345) # Fails on Py<3. + local_long = int # Py3's int is Py2's long type + local_unicode = str # Py3's str is Py2's unicode type except ValueError: # Python 2.x - pass + local_long = long + local_unicode = unicode if len(types) > 1: - if float in types: + if str in types: + return str + elif local_unicode in types: + return local_unicode + elif float in types: return float - elif long in types: - return long + elif local_long in types: + return local_long else: return int else: @@ -306,20 +419,19 @@ def get_key(self, name, attr_type, scope, default): except KeyError: new_id = "d%i" % len(list(self.keys)) self.keys[keys_key] = new_id - key_kwargs = {"id":new_id, - "for":scope, - "attr.name":name, - "attr.type":attr_type} - key_element=Element("key",**key_kwargs) + key_kwargs = {"id": new_id, + "for": scope, + "attr.name": name, + "attr.type": attr_type} + key_element = self.myElement("key", **key_kwargs) # add subelement for data default value if present if default is not None: - default_element=Element("default") - default_element.text=make_str(default) + default_element = self.myElement("default") + default_element.text = make_str(default) key_element.append(default_element) - self.xml.insert(0,key_element) + self.xml.insert(0, key_element) return new_id - def add_data(self, name, element_type, value, scope="all", default=None): @@ -328,10 +440,10 @@ def add_data(self, name, element_type, value, type in the keys table. """ if element_type not in self.xml_type: - raise nx.NetworkXError('GraphML writer does not support ' - '%s as data values.'%element_type) - key_id = self.get_key(name, self.xml_type[element_type], scope, default) - data_element = Element("data", key=key_id) + msg = 'GraphML writer does not support %s as data values.' + raise nx.NetworkXError(msg % element_type) + keyid = self.get_key(name, self.xml_type[element_type], scope, default) + data_element = self.myElement("data", key=keyid) data_element.text = make_str(value) return data_element @@ -339,32 +451,33 @@ def add_attributes(self, scope, xml_obj, data, default): """Appends attribute data to edges or nodes, and stores type information to be added later. See add_graph_element. """ - for k,v in data.items(): + for k, v in data.items(): self.attribute_types[(make_str(k), scope)].add(type(v)) self.attributes[xml_obj].append([k, v, scope, default.get(k)]) def add_nodes(self, G, graph_element): - for node,data in G.nodes(data=True): - node_element = Element("node", id = make_str(node)) - default=G.graph.get('node_default',{}) + default = G.graph.get('node_default', {}) + for node, data in G.nodes(data=True): + node_element = self.myElement("node", id=make_str(node)) self.add_attributes("node", node_element, data, default) graph_element.append(node_element) def add_edges(self, G, graph_element): if G.is_multigraph(): - for u,v,key,data in G.edges(data=True,keys=True): - edge_element = Element("edge",source=make_str(u), - target=make_str(v)) - default=G.graph.get('edge_default',{}) + for u, v, key, data in G.edges(data=True, keys=True): + edge_element = self.myElement("edge", source=make_str(u), + target=make_str(v), + id=make_str(key)) + default = G.graph.get('edge_default', {}) self.add_attributes("edge", edge_element, data, default) self.add_attributes("edge", edge_element, - {'key':key}, default) + {'key': key}, default) graph_element.append(edge_element) else: - for u,v,data in G.edges(data=True): - edge_element = Element("edge",source=make_str(u), - target=make_str(v)) - default=G.graph.get('edge_default',{}) + for u, v, data in G.edges(data=True): + edge_element = self.myElement("edge", source=make_str(u), + target=make_str(v)) + default = G.graph.get('edge_default', {}) self.add_attributes("edge", edge_element, data, default) graph_element.append(edge_element) @@ -373,42 +486,38 @@ def add_graph_element(self, G): Serialize graph G in GraphML to the stream. """ if G.is_directed(): - default_edge_type='directed' + default_edge_type = 'directed' else: - default_edge_type='undirected' + default_edge_type = 'undirected' - graphid=G.graph.pop('id',None) + graphid = G.graph.pop('id', None) if graphid is None: - graph_element = Element("graph", - edgedefault = default_edge_type) + graph_element = self.myElement("graph", + edgedefault=default_edge_type) else: - graph_element = Element("graph", - edgedefault = default_edge_type, - id=graphid) - - default={} - data=dict((k,v) for (k,v) in G.graph.items() - if k not in ['node_default','edge_default']) + graph_element = self.myElement("graph", + edgedefault=default_edge_type, + id=graphid) + default = {} + data = dict((k, v) for k, v in G.graph.items() + if k not in ['node_default', 'edge_default']) self.add_attributes("graph", graph_element, data, default) - self.add_nodes(G,graph_element) - self.add_edges(G,graph_element) + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) # self.attributes contains a mapping from XML Objects to a list of # data that needs to be added to them. - # We postpone processing of this in order to do type inference/generalization. + # We postpone processing in order to do type inference/generalization. # See self.attr_type for (xml_obj, data) in self.attributes.items(): for (k, v, scope, default) in data: - xml_obj.append(self.add_data(make_str(k), self.attr_type(k, scope, v), make_str(v), - scope, default)) - + xml_obj.append(self.add_data(make_str(k), + self.attr_type(k, scope, v), + make_str(v), scope, default)) self.xml.append(graph_element) - def add_graphs(self, graph_list): - """ - Add many graphs to this GraphML document. - """ + """ Add many graphs to this GraphML document. """ for G in graph_list: self.add_graph_element(G) @@ -420,7 +529,7 @@ def dump(self, stream): def indent(self, elem, level=0): # in-place prettyprint formatter - i = "\n" + level*" " + i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " @@ -435,51 +544,197 @@ def indent(self, elem, level=0): elem.tail = i -class GraphMLReader(GraphML): - """Read a GraphML document. Produces NetworkX graph objects. +class IncrementalElement(object): + """Wrapper for _IncrementalWriter providing an Element like interface. + + This wrapper does not intend to be a complete implemenation but rather to + deal with those calls used in GraphMLWriter. """ + + def __init__(self, xml, prettyprint): + self.xml = xml + self.prettyprint = prettyprint + + def append(self, element): + self.xml.write(element, pretty_print=self.prettyprint) + + +class GraphMLWriterLxml(GraphMLWriter): + def __init__(self, path, graph=None, encoding='utf-8', prettyprint=True, + infer_numeric_types=False): + self.myElement = lxmletree.Element + + self._encoding = encoding + self._prettyprint = prettyprint + self.infer_numeric_types = infer_numeric_types + + self._xml_base = lxmletree.xmlfile(path, encoding=encoding) + self._xml = self._xml_base.__enter__() + self._xml.write_declaration() + + # We need to have a xml variable that support insertion. This call is + # used for adding the keys to the document. + # We will store those keys in a plain list, and then after the graph + # element is closed we will add them to the main graphml element. + self.xml = [] + self._keys = self.xml + self._graphml = self._xml.element( + 'graphml', + { + 'xmlns': self.NS_GRAPHML, + 'xmlns:xsi': self.NS_XSI, + 'xsi:schemaLocation': self.SCHEMALOCATION + }) + self._graphml.__enter__() + self.keys = {} + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = 'directed' + else: + default_edge_type = 'undirected' + + graphid = G.graph.pop('id', None) + if graphid is None: + graph_element = self._xml.element('graph', + edgedefault=default_edge_type) + else: + graph_element = self._xml.element('graph', + edgedefault=default_edge_type, + id=graphid) + + # gather attributes types for the whole graph + # to find the most general numeric format needed. + # Then pass through attributes to create key_id for each. + graphdata = {k: v for k, v in G.graph.items() + if k not in ('node_default', 'edge_default')} + node_default = G.graph.get('node_default', {}) + edge_default = G.graph.get('edge_default', {}) + # Graph attributes + for k, v in graphdata.items(): + self.attribute_types[(make_str(k), "graph")].add(type(v)) + for k, v in graphdata.items(): + element_type = self.xml_type[self.attr_type(k, "graph", v)] + self.get_key(make_str(k), element_type, "graph", None) + # Nodes and data + attributes = {} + for node, d in G.nodes(data=True): + for k, v in d.items(): + self.attribute_types[(make_str(k), "node")].add(type(v)) + if k not in attributes: + attributes[k] = v + for k, v in attributes.items(): + T = self.xml_type[self.attr_type(k, "node", v)] + self.get_key(make_str(k), T, "node", node_default.get(k)) + # Edges and data + if G.is_multigraph(): + attributes = {} + for u, v, ekey, d in G.edges(keys=True, data=True): + self.attribute_types[("key", "edge")].add(type(ekey)) + if "key" not in attributes: + attributes["key"] = ekey + for k, v in d.items(): + self.attribute_types[(make_str(k), "edge")].add(type(v)) + if k not in attributes: + attributes[k] = v + for k, v in attributes.items(): + T = self.xml_type[self.attr_type(k, "edge", v)] + self.get_key(make_str(k), T, "edge", edge_default.get(k)) + else: + attributes = {} + for u, v, d in G.edges(data=True): + for k, v in d.items(): + self.attribute_types[(make_str(k), "edge")].add(type(v)) + if k not in attributes: + attributes[k] = v + for k, v in attributes.items(): + T = self.xml_type[self.attr_type(k, "edge", v)] + self.get_key(make_str(k), T, "edge", edge_default.get(k)) + + # Now add attribute keys to the xml file + for key in self.xml: + self._xml.write(key, pretty_print=self._prettyprint) + + # The incremental_writer writes each node/edge as it is created + incremental_writer = IncrementalElement(self._xml, self._prettyprint) + with graph_element: + self.add_attributes('graph', incremental_writer, graphdata, {}) + self.add_nodes(G, incremental_writer) # adds attributes too + self.add_edges(G, incremental_writer) # adds attributes too + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data.""" + for k, v in data.items(): + data_element = self.add_data(make_str(k), + self.attr_type(make_str(k), scope, v), + make_str(v), scope, default.get(k)) + xml_obj.append(data_element) + + def __str__(self): + return object.__str__(self) + + def dump(self): + self._graphml.__exit__(None, None, None) + self._xml_base.__exit__(None, None, None) + +# Choose a writer function for default +if lxmletree is None: + write_graphml = write_graphml_xml +else: + write_graphml = write_graphml_lxml + + +class GraphMLReader(GraphML): + """Read a GraphML document. Produces NetworkX graph objects.""" def __init__(self, node_type=str): try: import xml.etree.ElementTree except ImportError: - raise ImportError('GraphML reader requires ' - 'xml.elementtree.ElementTree') - self.node_type=node_type - self.multigraph=False # assume multigraph and test for parallel edges + msg = 'GraphML reader requires xml.elementtree.ElementTree' + raise ImportError(msg) + self.node_type = node_type + self.multigraph = False # assume multigraph and test for multiedges def __call__(self, path=None, string=None): if path is not None: - self.xml = ElementTree(file=path) + self.xml = ElementTree(file=path) elif string is not None: - self.xml = fromstring(string) + self.xml = fromstring(string) else: - raise ValueError("Must specify either 'path' or 'string' as kwarg.") - (keys,defaults) = self.find_graphml_keys(self.xml) + raise ValueError("Must specify either 'path' or 'string' as kwarg") + (keys, defaults) = self.find_graphml_keys(self.xml) for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML): yield self.make_graph(g, keys, defaults) def make_graph(self, graph_xml, graphml_keys, defaults): # set default graph type edgedefault = graph_xml.get("edgedefault", None) - if edgedefault=='directed': - G=nx.MultiDiGraph() + if edgedefault == 'directed': + G = nx.MultiDiGraph() else: - G=nx.MultiGraph() + G = nx.MultiGraph() # set defaults for graph attributes - G.graph['node_default']={} - G.graph['edge_default']={} - for key_id,value in defaults.items(): - key_for=graphml_keys[key_id]['for'] - name=graphml_keys[key_id]['name'] - python_type=graphml_keys[key_id]['type'] - if key_for=='node': - G.graph['node_default'].update({name:python_type(value)}) - if key_for=='edge': - G.graph['edge_default'].update({name:python_type(value)}) + G.graph['node_default'] = {} + G.graph['edge_default'] = {} + for key_id, value in defaults.items(): + key_for = graphml_keys[key_id]['for'] + name = graphml_keys[key_id]['name'] + python_type = graphml_keys[key_id]['type'] + if key_for == 'node': + G.graph['node_default'].update({name: python_type(value)}) + if key_for == 'edge': + G.graph['edge_default'].update({name: python_type(value)}) # hyperedges are not supported - hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML) + hyperedge = graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML) if hyperedge is not None: - raise nx.NetworkXError("GraphML reader does not support hyperedges") + raise nx.NetworkXError("GraphML reader doesn't support hyperedges") # add nodes for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML): self.add_node(G, node_xml, graphml_keys) @@ -503,7 +758,7 @@ def add_node(self, G, node_xml, graphml_keys): """Add a node to the graph. """ # warn on finding unsupported ports tag - ports=node_xml.find("{%s}port" % self.NS_GRAPHML) + ports = node_xml.find("{%s}port" % self.NS_GRAPHML) if ports is not None: warnings.warn("GraphML port tag not supported.") # find the node by id and cast it to the appropriate type @@ -516,18 +771,18 @@ def add_edge(self, G, edge_element, graphml_keys): """Add an edge to the graph. """ # warn on finding unsupported ports tag - ports=edge_element.find("{%s}port" % self.NS_GRAPHML) + ports = edge_element.find("{%s}port" % self.NS_GRAPHML) if ports is not None: warnings.warn("GraphML port tag not supported.") # raise error if we find mixed directed and undirected edges directed = edge_element.get("directed") - if G.is_directed() and directed=='false': - raise nx.NetworkXError(\ - "directed=false edge found in directed graph.") - if (not G.is_directed()) and directed=='true': - raise nx.NetworkXError(\ - "directed=true edge found in undirected graph.") + if G.is_directed() and directed == 'false': + msg = "directed=false edge found in directed graph." + raise nx.NetworkXError(msg) + if (not G.is_directed()) and directed == 'true': + msg = "directed=true edge found in undirected graph." + raise nx.NetworkXError(msg) source = self.node_type(edge_element.get("source")) target = self.node_type(edge_element.get("target")) @@ -537,14 +792,19 @@ def add_edge(self, G, edge_element, graphml_keys): # attribute is specified edge_id = edge_element.get("id") if edge_id: - data["id"] = edge_id - if G.has_edge(source,target): + if 'key' in data and make_str(data['key']) == edge_id: + # If id there equivalent to 'key' attribute, use key + edge_id = data.pop('key') + else: + data["id"] = edge_id + if G.has_edge(source, target): # mark this as a multigraph - self.multigraph=True + self.multigraph = True if edge_id is None: # no id specified, try using 'key' attribute as id - edge_id=data.pop('key',None) - G.add_edge(source, target, key=edge_id, **data) + edge_id = data.pop('key', None) + # Use add_edges_from to avoid error with add_edge when `'key' in data` + G.add_edges_from([(source, target, edge_id, data)]) def decode_data_elements(self, graphml_keys, obj_xml): """Use the key information to decode the data XML if present.""" @@ -552,16 +812,17 @@ def decode_data_elements(self, graphml_keys, obj_xml): for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML): key = data_element.get("key") try: - data_name=graphml_keys[key]['name'] - data_type=graphml_keys[key]['type'] + data_name = graphml_keys[key]['name'] + data_type = graphml_keys[key]['type'] except KeyError: - raise nx.NetworkXError("Bad GraphML data: no key %s"%key) - text=data_element.text + raise nx.NetworkXError("Bad GraphML data: no key %s" % key) + text = data_element.text # assume anything with subelements is a yfiles extension - if text is not None and len(list(data_element))==0: + if text is not None and len(list(data_element)) == 0: if data_type == bool: # Ignore cases. - # http://docs.oracle.com/javase/6/docs/api/java/lang/Boolean.html#parseBoolean%28java.lang.String%29 + # http://docs.oracle.com/javase/6/docs/api/java/lang/ + # Boolean.html#parseBoolean%28java.lang.String%29 data[data_name] = self.convert_bool[text.lower()] else: data[data_name] = data_type(text) @@ -569,23 +830,23 @@ def decode_data_elements(self, graphml_keys, obj_xml): # Assume yfiles as subelements, try to extract node_label node_label = None for node_type in ['ShapeNode', 'SVGNode', 'ImageNode']: - geometry = data_element.find("{%s}%s/{%s}Geometry" % - (self.NS_Y, node_type, self.NS_Y)) + pref = "{%s}%s/{%s}" % (self.NS_Y, node_type, self.NS_Y) + geometry = data_element.find("%sGeometry" % pref) if geometry is not None: data['x'] = geometry.get('x') data['y'] = geometry.get('y') if node_label is None: - node_label = data_element.find("{%s}%s/{%s}NodeLabel" % - (self.NS_Y, node_type, self.NS_Y)) + node_label = data_element.find("%sNodeLabel" % pref) if node_label is not None: data['label'] = node_label.text # check all the diffrent types of edges avaivable in yEd. - for e in ['PolyLineEdge', 'SplineEdge', 'QuadCurveEdge', 'BezierEdge', 'ArcEdge']: - edge_label = data_element.find("{%s}%s/{%s}EdgeLabel"% - (self.NS_Y, e, (self.NS_Y))) - if edge_label is not None: - break + for e in ['PolyLineEdge', 'SplineEdge', 'QuadCurveEdge', + 'BezierEdge', 'ArcEdge']: + pref = "{%s}%s/{%s}" % (self.NS_Y, e, self.NS_Y) + edge_label = data_element.find("%sEdgeLabel" % pref) + if edge_label is not None: + break if edge_label is not None: data['label'] = edge_label.text @@ -598,26 +859,26 @@ def find_graphml_keys(self, graph_element): graphml_key_defaults = {} for k in graph_element.findall("{%s}key" % self.NS_GRAPHML): attr_id = k.get("id") - attr_type=k.get('attr.type') - attr_name=k.get("attr.name") - yfiles_type=k.get("yfiles.type") + attr_type = k.get('attr.type') + attr_name = k.get("attr.name") + yfiles_type = k.get("yfiles.type") if yfiles_type is not None: attr_name = yfiles_type attr_type = 'yfiles' if attr_type is None: attr_type = "string" - warnings.warn("No key type for id %s. Using string"%attr_id) + warnings.warn("No key type for id %s. Using string" % attr_id) if attr_name is None: - raise nx.NetworkXError("Unknown key for id %s in file."%attr_id) - graphml_keys[attr_id] = { - "name":attr_name, - "type":self.python_type[attr_type], - "for":k.get("for")} + raise nx.NetworkXError("Unknown key for id %s." % attr_id) + graphml_keys[attr_id] = {"name": attr_name, + "type": self.python_type[attr_type], + "for": k.get("for")} # check for "default" subelement of key element - default=k.find("{%s}default" % self.NS_GRAPHML) + default = k.find("{%s}default" % self.NS_GRAPHML) if default is not None: - graphml_key_defaults[attr_id]=default.text - return graphml_keys,graphml_key_defaults + graphml_key_defaults[attr_id] = default.text + return graphml_keys, graphml_key_defaults + # fixture for nose tests def setup_module(module): @@ -627,6 +888,7 @@ def setup_module(module): except: raise SkipTest("xml.etree.ElementTree not available") + # fixture for nose tests def teardown_module(module): import os
diff --git a/networkx/readwrite/tests/test_graphml.py b/networkx/readwrite/tests/test_graphml.py --- a/networkx/readwrite/tests/test_graphml.py +++ b/networkx/readwrite/tests/test_graphml.py @@ -7,19 +7,15 @@ import tempfile import os -class TestGraph(object): - @classmethod - def setupClass(cls): - try: - import xml.etree.ElementTree - except ImportError: - raise SkipTest('xml.etree.ElementTree not available.') +class BaseGraphML(object): def setUp(self): - self.simple_directed_data="""<?xml version="1.0" encoding="UTF-8"?> + self.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?> <!-- This file was written by the JAVA GraphML Library.--> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" -xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G" edgedefault="directed"> <node id="n0"/> <node id="n1"/> @@ -45,26 +41,24 @@ def setUp(self): <edge source="n8" target="n9"/> </graph> </graphml>""" - self.simple_directed_graph=nx.DiGraph() + self.simple_directed_graph = nx.DiGraph() self.simple_directed_graph.add_node('n10') - self.simple_directed_graph.add_edge('n0','n2',id='foo') - self.simple_directed_graph.add_edges_from([('n1','n2'), - ('n2','n3'), - ('n3','n5'), - ('n3','n4'), - ('n4','n6'), - ('n6','n5'), - ('n5','n7'), - ('n6','n8'), - ('n8','n7'), - ('n8','n9'), + self.simple_directed_graph.add_edge('n0', 'n2', id='foo') + self.simple_directed_graph.add_edges_from([('n1', 'n2'), + ('n2', 'n3'), + ('n3', 'n5'), + ('n3', 'n4'), + ('n4', 'n6'), + ('n6', 'n5'), + ('n5', 'n7'), + ('n6', 'n8'), + ('n8', 'n7'), + ('n8', 'n9'), ]) - self.simple_directed_fh = \ io.BytesIO(self.simple_directed_data.encode('UTF-8')) - - self.attribute_data="""<?xml version="1.0" encoding="UTF-8"?> + self.attribute_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns @@ -106,24 +100,27 @@ def setUp(self): </graph> </graphml> """ - self.attribute_graph=nx.DiGraph(id='G') - self.attribute_graph.graph['node_default']={'color':'yellow'} - self.attribute_graph.add_node('n0',color='green') - self.attribute_graph.add_node('n2',color='blue') - self.attribute_graph.add_node('n3',color='red') + self.attribute_graph = nx.DiGraph(id='G') + self.attribute_graph.graph['node_default'] = {'color': 'yellow'} + self.attribute_graph.add_node('n0', color='green') + self.attribute_graph.add_node('n2', color='blue') + self.attribute_graph.add_node('n3', color='red') self.attribute_graph.add_node('n4') - self.attribute_graph.add_node('n5',color='turquoise') - self.attribute_graph.add_edge('n0','n2',id='e0',weight=1.0) - self.attribute_graph.add_edge('n0','n1',id='e1',weight=1.0) - self.attribute_graph.add_edge('n1','n3',id='e2',weight=2.0) - self.attribute_graph.add_edge('n3','n2',id='e3') - self.attribute_graph.add_edge('n2','n4',id='e4') - self.attribute_graph.add_edge('n3','n5',id='e5') - self.attribute_graph.add_edge('n5','n4',id='e6',weight=1.1) + self.attribute_graph.add_node('n5', color='turquoise') + self.attribute_graph.add_edge('n0', 'n2', id='e0', weight=1.0) + self.attribute_graph.add_edge('n0', 'n1', id='e1', weight=1.0) + self.attribute_graph.add_edge('n1', 'n3', id='e2', weight=2.0) + self.attribute_graph.add_edge('n3', 'n2', id='e3') + self.attribute_graph.add_edge('n2', 'n4', id='e4') + self.attribute_graph.add_edge('n3', 'n5', id='e5') + self.attribute_graph.add_edge('n5', 'n4', id='e6', weight=1.1) self.attribute_fh = io.BytesIO(self.attribute_data.encode('UTF-8')) self.attribute_numeric_type_data = """<?xml version='1.0' encoding='utf-8'?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key attr.name="weight" attr.type="double" for="node" id="d1" /> <key attr.name="weight" attr.type="double" for="edge" id="d0" /> <graph edgedefault="directed"> @@ -136,23 +133,28 @@ def setUp(self): <edge source="n0" target="n1"> <data key="d0">1</data> </edge> + <edge source="n1" target="n0"> + <data key="d0">k</data> + </edge> <edge source="n1" target="n1"> <data key="d0">1.0</data> </edge> </graph> </graphml> """ - self.attribute_numeric_type_graph = nx.DiGraph() self.attribute_numeric_type_graph.add_node('n0', weight=1) self.attribute_numeric_type_graph.add_node('n1', weight=2.0) self.attribute_numeric_type_graph.add_edge('n0', 'n1', weight=1) self.attribute_numeric_type_graph.add_edge('n1', 'n1', weight=1.0) - self.attribute_numeric_type_fh = io.BytesIO(self.attribute_numeric_type_data.encode('UTF-8')) + fh = io.BytesIO(self.attribute_numeric_type_data.encode('UTF-8')) + self.attribute_numeric_type_fh = fh - self.simple_undirected_data="""<?xml version="1.0" encoding="UTF-8"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" -xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + self.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> @@ -164,101 +166,66 @@ def setUp(self): </graph> </graphml>""" # <edge source="n8" target="n10" directed="false"/> - self.simple_undirected_graph=nx.Graph() + self.simple_undirected_graph = nx.Graph() self.simple_undirected_graph.add_node('n10') - self.simple_undirected_graph.add_edge('n0','n2',id='foo') - self.simple_undirected_graph.add_edges_from([('n1','n2'), - ('n2','n3'), - ]) - - self.simple_undirected_fh = io.BytesIO(self.simple_undirected_data.encode('UTF-8')) + self.simple_undirected_graph.add_edge('n0', 'n2', id='foo') + self.simple_undirected_graph.add_edges_from([('n1', 'n2'), + ('n2', 'n3'), + ]) + fh = io.BytesIO(self.simple_undirected_data.encode('UTF-8')) + self.simple_undirected_fh = fh +class TestReadGraphML(BaseGraphML): def test_read_simple_directed_graphml(self): - G=self.simple_directed_graph - H=nx.read_graphml(self.simple_directed_fh) - assert_equal(sorted(G.nodes()),sorted(H.nodes())) - assert_equal(sorted(G.edges()),sorted(H.edges())) + G = self.simple_directed_graph + H = nx.read_graphml(self.simple_directed_fh) + assert_equal(sorted(G.nodes()), sorted(H.nodes())) + assert_equal(sorted(G.edges()), sorted(H.edges())) assert_equal(sorted(G.edges(data=True)), sorted(H.edges(data=True))) self.simple_directed_fh.seek(0) - I=nx.parse_graphml(self.simple_directed_data) - assert_equal(sorted(G.nodes()),sorted(I.nodes())) - assert_equal(sorted(G.edges()),sorted(I.edges())) + I = nx.parse_graphml(self.simple_directed_data) + assert_equal(sorted(G.nodes()), sorted(I.nodes())) + assert_equal(sorted(G.edges()), sorted(I.edges())) assert_equal(sorted(G.edges(data=True)), sorted(I.edges(data=True))) - def test_write_read_simple_directed_graphml(self): - G=self.simple_directed_graph - fh=io.BytesIO() - nx.write_graphml(G,fh) - fh.seek(0) - H=nx.read_graphml(fh) - assert_equal(sorted(G.nodes()),sorted(H.nodes())) - assert_equal(sorted(G.edges()),sorted(H.edges())) - assert_equal(sorted(G.edges(data=True)), - sorted(H.edges(data=True))) - self.simple_directed_fh.seek(0) - def test_read_simple_undirected_graphml(self): - G=self.simple_undirected_graph - H=nx.read_graphml(self.simple_undirected_fh) + G = self.simple_undirected_graph + H = nx.read_graphml(self.simple_undirected_fh) assert_nodes_equal(G.nodes(), H.nodes()) assert_edges_equal(G.edges(), H.edges()) self.simple_undirected_fh.seek(0) - I=nx.parse_graphml(self.simple_undirected_data) + I = nx.parse_graphml(self.simple_undirected_data) assert_nodes_equal(G.nodes(), I.nodes()) assert_edges_equal(G.edges(), I.edges()) - def test_write_read_attribute_numeric_type_graphml(self): - from xml.etree.ElementTree import parse - - G = self.attribute_numeric_type_graph - fh = io.BytesIO() - nx.write_graphml(G, fh, infer_numeric_types=True) - fh.seek(0) - H = nx.read_graphml(fh) - fh.seek(0) - - assert_nodes_equal(G.nodes(), H.nodes()) - assert_edges_equal(G.edges(), H.edges()) - assert_edges_equal(G.edges(data=True), H.edges(data=True)) - self.attribute_numeric_type_fh.seek(0) - - xml = parse(fh) - # Children are the key elements, and the graph element - children = xml.getroot().getchildren() - assert_equal(len(children), 3) - - keys = [child.items() for child in children[:2]] - - assert_equal(len(keys), 2) - assert_in(('attr.type', 'double'), keys[0]) - assert_in(('attr.type', 'double'), keys[1]) - def test_read_attribute_graphml(self): - G=self.attribute_graph - H=nx.read_graphml(self.attribute_fh) - assert_nodes_equal(G.nodes(True),sorted(H.nodes(data=True))) - ge=sorted(G.edges(data=True)) - he=sorted(H.edges(data=True)) - for a,b in zip(ge,he): - assert_equal(a,b) + G = self.attribute_graph + H = nx.read_graphml(self.attribute_fh) + assert_nodes_equal(G.nodes(True), sorted(H.nodes(data=True))) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert_equal(a, b) self.attribute_fh.seek(0) - I=nx.parse_graphml(self.attribute_data) - assert_equal(sorted(G.nodes(True)),sorted(I.nodes(data=True))) - ge=sorted(G.edges(data=True)) - he=sorted(I.edges(data=True)) - for a,b in zip(ge,he): - assert_equal(a,b) + I = nx.parse_graphml(self.attribute_data) + assert_equal(sorted(G.nodes(True)), sorted(I.nodes(data=True))) + ge = sorted(G.edges(data=True)) + he = sorted(I.edges(data=True)) + for a, b in zip(ge, he): + assert_equal(a, b) def test_directed_edge_in_undirected(self): - s="""<?xml version="1.0" encoding="UTF-8"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" -xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + s = """<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> @@ -268,13 +235,15 @@ def test_directed_edge_in_undirected(self): </graph> </graphml>""" fh = io.BytesIO(s.encode('UTF-8')) - assert_raises(nx.NetworkXError,nx.read_graphml,fh) - assert_raises(nx.NetworkXError,nx.parse_graphml,s) + assert_raises(nx.NetworkXError, nx.read_graphml, fh) + assert_raises(nx.NetworkXError, nx.parse_graphml, s) def test_undirected_edge_in_directed(self): - s="""<?xml version="1.0" encoding="UTF-8"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" -xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + s = """<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G" edgedefault='directed'> <node id="n0"/> <node id="n1"/> @@ -284,15 +253,15 @@ def test_undirected_edge_in_directed(self): </graph> </graphml>""" fh = io.BytesIO(s.encode('UTF-8')) - assert_raises(nx.NetworkXError,nx.read_graphml,fh) - assert_raises(nx.NetworkXError,nx.parse_graphml,s) + assert_raises(nx.NetworkXError, nx.read_graphml, fh) + assert_raises(nx.NetworkXError, nx.parse_graphml, s) def test_key_raise(self): - s="""<?xml version="1.0" encoding="UTF-8"?> + s = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns - http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="d0" for="node" attr.name="color" attr.type="string"> <default>yellow</default> </key> @@ -312,15 +281,15 @@ def test_key_raise(self): </graphml> """ fh = io.BytesIO(s.encode('UTF-8')) - assert_raises(nx.NetworkXError,nx.read_graphml,fh) - assert_raises(nx.NetworkXError,nx.parse_graphml,s) + assert_raises(nx.NetworkXError, nx.read_graphml, fh) + assert_raises(nx.NetworkXError, nx.parse_graphml, s) def test_hyperedge_raise(self): - s="""<?xml version="1.0" encoding="UTF-8"?> + s = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns - http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="d0" for="node" attr.name="color" attr.type="string"> <default>yellow</default> </key> @@ -342,37 +311,40 @@ def test_hyperedge_raise(self): </graphml> """ fh = io.BytesIO(s.encode('UTF-8')) - assert_raises(nx.NetworkXError,nx.read_graphml,fh) - assert_raises(nx.NetworkXError,nx.parse_graphml,s) - - # remove test until we get the "name" issue sorted - # https://networkx.lanl.gov/trac/ticket/544 - def test_default_attribute(self): - G=nx.Graph() - G.add_node(1,label=1,color='green') - nx.add_path(G, [0, 1, 2, 3]) - G.add_edge(1,2,weight=3) - G.graph['node_default']={'color':'yellow'} - G.graph['edge_default']={'weight':7} - fh = io.BytesIO() - nx.write_graphml(G,fh) - fh.seek(0) - H=nx.read_graphml(fh,node_type=int) - assert_nodes_equal(G.nodes(), H.nodes()) - assert_edges_equal(G.edges(), H.edges()) - assert_equal(G.graph,H.graph) + assert_raises(nx.NetworkXError, nx.read_graphml, fh) + assert_raises(nx.NetworkXError, nx.parse_graphml, s) def test_multigraph_keys(self): - # test that multigraphs use edge id attributes as key - pass - - def test_multigraph_to_graph(self): - # test converting multigraph to graph if no parallel edges are found - pass + # Test that reading multigraphs uses edge id attributes as keys + s = """<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <graph id="G" edgedefault="directed"> + <node id="n0"/> + <node id="n1"/> + <edge id="e0" source="n0" target="n1"/> + <edge id="e1" source="n0" target="n1"/> + </graph> +</graphml> +""" + fh = io.BytesIO(s.encode('UTF-8')) + G = nx.read_graphml(fh) + expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")] + assert_equal(sorted(G.edges(keys=True)), expected) + fh.seek(0) + H = nx.parse_graphml(s) + assert_equal(sorted(H.edges(keys=True)), expected) def test_yfiles_extension(self): - data="""<?xml version="1.0" encoding="UTF-8" standalone="no"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd"> + data = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:y="http://www.yworks.com/xml/graphml" + xmlns:yed="http://www.yworks.com/xml/yed/3" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <!--Created by yFiles for Java 2.7--> <key for="graphml" id="d0" yfiles.type="resources"/> <key attr.name="url" attr.type="string" for="node" id="d1"/> @@ -391,7 +363,12 @@ def test_yfiles_extension(self): <y:Geometry height="30.0" width="30.0" x="125.0" y="100.0"/> <y:Fill color="#FFCC00" transparent="false"/> <y:BorderStyle color="#000000" type="line" width="1.0"/> - <y:NodeLabel alignment="center" autoSizePolicy="content" borderDistance="0.0" fontFamily="Dialog" fontSize="13" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="19.1328125" modelName="internal" modelPosition="c" textColor="#000000" visible="true" width="12.27099609375" x="8.864501953125" y="5.43359375">1</y:NodeLabel> + <y:NodeLabel alignment="center" autoSizePolicy="content" + borderDistance="0.0" fontFamily="Dialog" fontSize="13" + fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" + height="19.1328125" modelName="internal" modelPosition="c" + textColor="#000000" visible="true" width="12.27099609375" + x="8.864501953125" y="5.43359375">1</y:NodeLabel> <y:Shape type="rectangle"/> </y:ShapeNode> </data> @@ -402,7 +379,12 @@ def test_yfiles_extension(self): <y:Geometry height="30.0" width="30.0" x="183.0" y="205.0"/> <y:Fill color="#FFCC00" transparent="false"/> <y:BorderStyle color="#000000" type="line" width="1.0"/> - <y:NodeLabel alignment="center" autoSizePolicy="content" borderDistance="0.0" fontFamily="Dialog" fontSize="13" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="19.1328125" modelName="internal" modelPosition="c" textColor="#000000" visible="true" width="12.27099609375" x="8.864501953125" y="5.43359375">2</y:NodeLabel> + <y:NodeLabel alignment="center" autoSizePolicy="content" + borderDistance="0.0" fontFamily="Dialog" fontSize="13" + fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" + height="19.1328125" modelName="internal" modelPosition="c" + textColor="#000000" visible="true" width="12.27099609375" + x="8.864501953125" y="5.43359375">2</y:NodeLabel> <y:Shape type="rectangle"/> </y:ShapeNode> </data> @@ -424,43 +406,24 @@ def test_yfiles_extension(self): </graphml> """ fh = io.BytesIO(data.encode('UTF-8')) - G=nx.read_graphml(fh) - assert_equal(list(G.edges()),[('n0','n1')]) - assert_equal(G['n0']['n1']['id'],'e0') - assert_equal(G.node['n0']['label'],'1') - assert_equal(G.node['n1']['label'],'2') - - H=nx.parse_graphml(data) - assert_equal(list(H.edges()),[('n0','n1')]) - assert_equal(H['n0']['n1']['id'],'e0') - assert_equal(H.node['n0']['label'],'1') - assert_equal(H.node['n1']['label'],'2') - - def test_unicode(self): - G = nx.Graph() - try: # Python 3.x - name1 = chr(2344) + chr(123) + chr(6543) - name2 = chr(5543) + chr(1543) + chr(324) - node_type=str - except ValueError: # Python 2.6+ - name1 = unichr(2344) + unichr(123) + unichr(6543) - name2 = unichr(5543) + unichr(1543) + unichr(324) - node_type=unicode - G.add_edge(name1, 'Radiohead', foo=name2) - fd, fname = tempfile.mkstemp() - nx.write_graphml(G, fname) - H = nx.read_graphml(fname,node_type=node_type) - assert_equal(G.adj, H.adj) - os.close(fd) - os.unlink(fname) + G = nx.read_graphml(fh) + assert_equal(list(G.edges()), [('n0', 'n1')]) + assert_equal(G['n0']['n1']['id'], 'e0') + assert_equal(G.node['n0']['label'], '1') + assert_equal(G.node['n1']['label'], '2') + H = nx.parse_graphml(data) + assert_equal(list(H.edges()), [('n0', 'n1')]) + assert_equal(H['n0']['n1']['id'], 'e0') + assert_equal(H.node['n0']['label'], '1') + assert_equal(H.node['n1']['label'], '2') def test_bool(self): s = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns - http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="d0" for="node" attr.name="test" attr.type="boolean"> <default>false</default> </key> @@ -497,3 +460,199 @@ def test_bool(self): assert_equal(graph.node['n4']['test'], True) assert_equal(graph.node['n5']['test'], False) assert_equal(graph.node['n6']['test'], True) + + def test_graphml_header_line(self): + good = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <key id="d0" for="node" attr.name="test" attr.type="boolean"> + <default>false</default> + </key> + <graph id="G"> + <node id="n0"> + <data key="d0">true</data> + </node> + </graph> +</graphml> +""" + bad = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<graphml> + <key id="d0" for="node" attr.name="test" attr.type="boolean"> + <default>false</default> + </key> + <graph id="G"> + <node id="n0"> + <data key="d0">true</data> + </node> + </graph> +</graphml> +""" + ugly = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<graphml xmlns="https://ghghgh"> + <key id="d0" for="node" attr.name="test" attr.type="boolean"> + <default>false</default> + </key> + <graph id="G"> + <node id="n0"> + <data key="d0">true</data> + </node> + </graph> +</graphml> +""" + for s in (good, bad): + fh = io.BytesIO(s.encode('UTF-8')) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert_equal(graph.node['n0']['test'], True) + + fh = io.BytesIO(ugly.encode('UTF-8')) + assert_raises(nx.NetworkXError, nx.read_graphml, fh) + assert_raises(nx.NetworkXError, nx.parse_graphml, ugly) + + +class TestWriteGraphML(BaseGraphML): + writer = staticmethod(nx.write_graphml_lxml) + + @classmethod + def setupClass(cls): + try: + import lxml.etree + except ImportError: + raise SkipTest('lxml.etree not available.') + + def test_write_interface(self): + try: + import lxml.etree + assert_equal(nx.write_graphml, nx.write_graphml_lxml) + except ImportError: + assert_equal(nx.write_graphml, nx.write_graphml_xml) + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + G.graph['hi'] = 'there' + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert_equal(sorted(G.nodes()), sorted(H.nodes())) + assert_equal(sorted(G.edges()), sorted(H.edges())) + assert_equal(sorted(G.edges(data=True)), sorted(H.edges(data=True))) + self.simple_directed_fh.seek(0) + + def test_write_read_attribute_numeric_type_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_numeric_type_graph + fh = io.BytesIO() + self.writer(G, fh, infer_numeric_types=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert_nodes_equal(G.nodes(), H.nodes()) + assert_edges_equal(G.edges(), H.edges()) + assert_edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_numeric_type_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = xml.getroot().getchildren() + assert_equal(len(children), 3) + + keys = [child.items() for child in children[:2]] + + assert_equal(len(keys), 2) + assert_in(('attr.type', 'double'), keys[0]) + assert_in(('attr.type', 'double'), keys[1]) + + def test_more_multigraph_keys(self): + """Writing keys as edge id attributes means keys become strings. + The original keys are stored as data, so read them back in + if `make_str(key) == edge_id` + This allows the adjacency to remain the same. + """ + G = nx.MultiGraph() + G.add_edges_from([('a', 'b', 2), ('a', 'b', 3)]) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname) + assert_true(H.is_multigraph()) + assert_edges_equal(G.edges(keys=True), H.edges(keys=True)) + assert_equal(G._adj, H._adj) + os.close(fd) + os.unlink(fname) + + def test_default_attribute(self): + G = nx.Graph(name="Fred") + G.add_node(1, label=1, color='green') + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, weight=3) + G.graph['node_default'] = {'color': 'yellow'} + G.graph['edge_default'] = {'weight': 7} + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert_nodes_equal(G.nodes(), H.nodes()) + assert_edges_equal(G.edges(), H.edges()) + assert_equal(G.graph, H.graph) + + def test_multigraph_to_graph(self): + # test converting multigraph to graph if no parallel edges found + G = nx.MultiGraph() + G.add_edges_from([('a', 'b', 2), ('b', 'c', 3)]) # no multiedges + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname) + assert_false(H.is_multigraph()) + os.close(fd) + os.unlink(fname) + + def test_unicode_attributes(self): + G = nx.Graph() + try: # Python 3.x + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + node_type = str + except ValueError: # Python 2.6+ + name1 = unichr(2344) + unichr(123) + unichr(6543) + name2 = unichr(5543) + unichr(1543) + unichr(324) + node_type = unicode + G.add_edge(name1, 'Radiohead', foo=name2) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=node_type) + assert_equal(G._adj, H._adj) + os.close(fd) + os.unlink(fname) + + def test_unicode_escape(self): + # test for handling json escaped stings in python 2 Issue #1880 + import json + a = dict(a='{"a": "123"}') # an object with many chars to escape + try: # Python 3.x + chr(2344) + sa = json.dumps(a) + except ValueError: # Python 2.6+ + sa = unicode(json.dumps(a)) + G = nx.Graph() + G.graph['test'] = sa + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert_equal(G.graph['test'], H.graph['test']) + + +class TestXMLGraphML(TestWriteGraphML): + writer = staticmethod(nx.write_graphml_xml) + + @classmethod + def setupClass(cls): + try: + import xml.etree.ElementTree + except ImportError: + raise SkipTest('xml.etree.ElementTree not available.')
Reimplement graphml to use SAX (and not build XML documents in memory) (migrated from Trac #455) Original ticket https://networkx.lanl.gov/trac/ticket/455 Reported 2010-11-01 by @alexsdutton, assigned to @alexsdutton. I've reimplemented the readwrite.graphml API to use a SAX content handler and xml.sax.utils.XMLGenerator instead of etree. It passes the tests (so could be assumed to be a functional replacement), but may need tidying. We'd like to use graphml for serialization but it builds the XML document in memory, which is suboptimal for very large graphs. A SAX-based implementation also seems to be much quicker. I've attached the patch.
Attachment in Trac by @alexsdutton, 2010-11-01: [graphml-reimplementation.patch](https://networkx.lanl.gov/trac/attachment/ticket/455/graphml-reimplementation.patch) Comment in Trac by @hagberg, 2010-11-02 Thanks - that is great! I don't know much about XML parsing but it would be great if this sped up reading and writing large files. Here are some comments on quick review: - There are still some stray print statements to be removed - To be included in NetworkX it should work with Python3.x (and Python 2.6+). So it needs to handle unicode/str. It should be doable - see the type section in the gexf.py for one hacky way. - I'd like it to at least be able to read the yEd generated graphml files (yfiles extension) like the current DOM version - It's OK to define a new NetworkX exception - but maybe we should call it NetworkXIOError to match Python and other NetworkX exceptions - Nit-picky alert: try to keep the line length less than 80 columns. Comment in Trac by @hagberg, 2010-11-02 Comment in Trac by @alexsdutton, 2010-11-02 Hi Aric, Thanks for the review and feedback. I'll get on to fixing those issues shortly. I'm glad you're driving Py3k compatibility, and sorry for not considering it. In Py2.6+, am I right in thinking that all strings should be unicode once they've been pulled out of the XML anyway? In which case (to follow the yexf example) something like: ''' try: chr(12345) # Fails on Py!=3. unicode = str # Py3k's str is our unicode type except ValueError, e: pass ''' should work if placed near the top. With regards to yEd versions, would it be worth me adding more tests to lay out the required interoperability? I'd also like to improve the comments (i.e. add some), as it's possibly a little difficult to understand what it's up to. Comment in Trac by Aric Hagberg <aric.hagberg, 2010-11-03 In [816ebcc0d1fa09646a7b0d03a90b99bc8aab96d8/networkx]: ''' #!CommitTicketReference repository="networkx" revision="816ebcc0d1fa09646a7b0d03a90b99bc8aab96d8" Add yfiles graphml data test. Addresses #455 ''' Comment in Trac by @hagberg, 2010-11-03 Replying to [comment:3 alexsdutton]: > I'm glad you're driving Py3k compatibility, and sorry for not considering it. Almost all of NetworkX works with Python 3.1 (only the few parts that depend on external libraries that aren't Python 3 ready). > In Py2.6+, am I right in thinking that all strings should be unicode once they've been pulled out of the XML anyway? In which case (to follow the yexf example) something like: > > ''' > > try: > chr(12345) # Fails on Py!=3. > unicode = str # Py3k's str is our unicode type > except ValueError, e: > pass > ''' > > should work if placed near the top. That seems like it will work. For Python 2.x the strings should be unicode (unless we want to allow the user to override that). > With regards to yEd versions, would it be worth me adding more tests to lay out the required interoperability? I just added a very basic smoke test for reading a yEd generated graphml file. The currently DOM implementation reads the file correctly but throws away any files extension data. Obviously that isn't a full implementation.... Comment in Trac by @hagberg, 2010-11-24 Comment in Trac by @hagberg, 2010-12-14 Comment in Trac by @hagberg, 2011-01-14 Comment in Trac by @hagberg, 2011-05-14 This is still waiting for updated code. Comment in Trac by @hagberg, 2011-05-20 Is there any progress on this issue or a new implementation should be proposed? I was forced to write my own GraphML writer to output very large graphs, and would be glad to send a pull request for this if needed. I had no idea about this one. Thanks for bringing attention to it. Does your writer use SAX? Does it pass all the unittests we have for grapml? I haven't finished the change, but it would use lxml instead of SAX (http://lxml.de/api.html#incremental-xml-generation), as it allows you to reuse some of the already written pieces using Element Tree. I will work on this and send you a Pull request then. I submitted a pull request 4 months ago, and still no comments.
2017-07-15T06:31:47
networkx/networkx
2,522
networkx__networkx-2522
[ "2479", "2479" ]
65115bc9e15760ec4925796ae48b75d4c8b61b9e
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py --- a/networkx/convert_matrix.py +++ b/networkx/convert_matrix.py @@ -37,7 +37,8 @@ __all__ = ['from_numpy_matrix', 'to_numpy_matrix', 'from_pandas_dataframe', 'to_pandas_dataframe', 'to_numpy_recarray', - 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix'] + 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix', + 'from_numpy_array', 'to_numpy_array'] def to_pandas_dataframe(G, nodelist=None, dtype=None, order=None, @@ -330,80 +331,11 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, [ 0., 0., 4.]]) """ import numpy as np - if nodelist is None: - nodelist = list(G) - nodeset = set(nodelist) - if len(nodelist) != len(nodeset): - msg = "Ambiguous ordering: `nodelist` contained duplicates." - raise nx.NetworkXError(msg) - - nlen = len(nodelist) - undirected = not G.is_directed() - index = dict(zip(nodelist, range(nlen))) - - # Initially, we start with an array of nans. Then we populate the matrix - # using data from the graph. Afterwards, any leftover nans will be - # converted to the value of `nonedge`. Note, we use nans initially, - # instead of zero, for two reasons: - # - # 1) It can be important to distinguish a real edge with the value 0 - # from a nonedge with the value 0. - # - # 2) When working with multi(di)graphs, we must combine the values of all - # edges between any two nodes in some manner. This often takes the - # form of a sum, min, or max. Using the value 0 for a nonedge would - # have undesirable effects with min and max, but using nanmin and - # nanmax with initially nan values is not problematic at all. - # - # That said, there are still some drawbacks to this approach. Namely, if - # a real edge is nan, then that value is a) not distinguishable from - # nonedges and b) is ignored by the default combinator (nansum, nanmin, - # nanmax) functions used for multi(di)graphs. If this becomes an issue, - # an alternative approach is to use masked arrays. Initially, every - # element is masked and set to some `initial` value. As we populate the - # graph, elements are unmasked (automatically) when we combine the initial - # value with the values given by real edges. At the end, we convert all - # masked values to `nonedge`. Using masked arrays fully addresses reason 1, - # but for reason 2, we would still have the issue with min and max if the - # initial values were 0.0. Note: an initial value of +inf is appropriate - # for min, while an initial value of -inf is appropriate for max. When - # working with sum, an initial value of zero is appropriate. Ideally then, - # we'd want to allow users to specify both a value for nonedges and also - # an initial value. For multi(di)graphs, the choice of the initial value - # will, in general, depend on the combinator function---sensible defaults - # can be provided. - - if G.is_multigraph(): - # Handle MultiGraphs and MultiDiGraphs - M = np.full((nlen, nlen), np.nan, order=order) - # use numpy nan-aware operations - operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax} - try: - op = operator[multigraph_weight] - except: - raise ValueError('multigraph_weight must be sum, min, or max') - for u, v, attrs in G.edges(data=True): - if (u in nodeset) and (v in nodeset): - i, j = index[u], index[v] - e_weight = attrs.get(weight, 1) - M[i, j] = op([e_weight, M[i, j]]) - if undirected: - M[j, i] = M[i, j] - else: - # Graph or DiGraph, this is much faster than above - M = np.full((nlen, nlen), np.nan, order=order) - for u, nbrdict in G.adjacency(): - for v, d in nbrdict.items(): - try: - M[index[u], index[v]] = d.get(weight, 1) - except KeyError: - # This occurs when there are fewer desired nodes than - # there are nodes in the graph: len(nodelist) < len(G) - pass - - M[np.isnan(M)] = nonedge - M = np.asmatrix(M, dtype=dtype) + A = to_numpy_array(G, nodelist=nodelist, dtype=dtype, order=order, + multigraph_weight=multigraph_weight, weight=weight, + nonedge=nonedge) + M = np.asmatrix(A, dtype=dtype) return M @@ -923,6 +855,266 @@ def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None, return G +def to_numpy_array(G, nodelist=None, dtype=None, order=None, + multigraph_weight=sum, weight='weight', nonedge=0.0): + """Return the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data type, optional + A valid single NumPy data type used to initialize the array. + This must be a simple type such as int or numpy.float64 and + not a compound data type (see to_numpy_recarray) + If None, then the NumPy default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float (default = 0.0) + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + See Also + -------- + from_numpy_array + + Notes + ----- + Entries in the adjacency matrix are assigned to the weight edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[ 1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[ 2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0,1,weight=2) + 0 + >>> G.add_edge(1,0) + 0 + >>> G.add_edge(2,2,weight=3) + 0 + >>> G.add_edge(2,2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0,1,2]) + array([[ 0., 2., 0.], + [ 1., 0., 0.], + [ 0., 0., 4.]]) + """ + import numpy as np + if nodelist is None: + nodelist = list(G) + nodeset = set(nodelist) + if len(nodelist) != len(nodeset): + msg = "Ambiguous ordering: `nodelist` contained duplicates." + raise nx.NetworkXError(msg) + + nlen = len(nodelist) + undirected = not G.is_directed() + index = dict(zip(nodelist, range(nlen))) + + # Initially, we start with an array of nans. Then we populate the array + # using data from the graph. Afterwards, any leftover nans will be + # converted to the value of `nonedge`. Note, we use nans initially, + # instead of zero, for two reasons: + # + # 1) It can be important to distinguish a real edge with the value 0 + # from a nonedge with the value 0. + # + # 2) When working with multi(di)graphs, we must combine the values of all + # edges between any two nodes in some manner. This often takes the + # form of a sum, min, or max. Using the value 0 for a nonedge would + # have undesirable effects with min and max, but using nanmin and + # nanmax with initially nan values is not problematic at all. + # + # That said, there are still some drawbacks to this approach. Namely, if + # a real edge is nan, then that value is a) not distinguishable from + # nonedges and b) is ignored by the default combinator (nansum, nanmin, + # nanmax) functions used for multi(di)graphs. If this becomes an issue, + # an alternative approach is to use masked arrays. Initially, every + # element is masked and set to some `initial` value. As we populate the + # graph, elements are unmasked (automatically) when we combine the initial + # value with the values given by real edges. At the end, we convert all + # masked values to `nonedge`. Using masked arrays fully addresses reason 1, + # but for reason 2, we would still have the issue with min and max if the + # initial values were 0.0. Note: an initial value of +inf is appropriate + # for min, while an initial value of -inf is appropriate for max. When + # working with sum, an initial value of zero is appropriate. Ideally then, + # we'd want to allow users to specify both a value for nonedges and also + # an initial value. For multi(di)graphs, the choice of the initial value + # will, in general, depend on the combinator function---sensible defaults + # can be provided. + + if G.is_multigraph(): + # Handle MultiGraphs and MultiDiGraphs + A = np.full((nlen, nlen), np.nan, order=order) + # use numpy nan-aware operations + operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax} + try: + op = operator[multigraph_weight] + except: + raise ValueError('multigraph_weight must be sum, min, or max') + + for u, v, attrs in G.edges(data=True): + if (u in nodeset) and (v in nodeset): + i, j = index[u], index[v] + e_weight = attrs.get(weight, 1) + A[i, j] = op([e_weight, A[i, j]]) + if undirected: + A[j, i] = A[i, j] + else: + # Graph or DiGraph, this is much faster than above + A = np.full((nlen, nlen), np.nan, order=order) + for u, nbrdict in G.adjacency(): + for v, d in nbrdict.items(): + try: + A[index[u], index[v]] = d.get(weight, 1) + except KeyError: + # This occurs when there are fewer desired nodes than + # there are nodes in the graph: len(nodelist) < len(G) + pass + + A[np.isnan(A)] = nonedge + A = np.asarray(A, dtype=dtype) + return A + + +def from_numpy_array(A, parallel_edges=False, create_using=None): + """Return a graph from NumPy array. + + The NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : NumPy ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the adjacency matrix is + interpreted as the number of parallel edges joining vertices *i* + and *j* in the graph. If it is False, then the entries in the + adjacency matrix are interpreted as the weight of a single edge + joining the vertices. + + create_using : NetworkX graph + Use specified graph for result. The default is Graph() + + Notes + ----- + If `create_using` is an instance of :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` is an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` is a multigraph and the array has only integer entries, + the entries will be interpreted as weighted edges joining the vertices + (without creating parallel edges): + + >>> import numpy as np + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph()) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` is a multigraph and the array has only integer entries + but `parallel_edges` is True, then the entries will be interpreted as + the number of parallel edges joining those two vertices: + + >>> import numpy as np + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> import numpy + >>> dt = [('weight', float), ('cost', int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]['cost'] + 2 + >>> G[0][0]['weight'] + 1.0 + + """ + return from_numpy_matrix(A, parallel_edges=parallel_edges, + create_using=create_using) + + # fixture for nose tests def setup_module(module): from nose import SkipTest
diff --git a/networkx/tests/test_convert_numpy.py b/networkx/tests/test_convert_numpy.py --- a/networkx/tests/test_convert_numpy.py +++ b/networkx/tests/test_convert_numpy.py @@ -235,3 +235,210 @@ def test_dtype_int_multigraph(self): G = nx.MultiGraph(nx.complete_graph(3)) A = nx.to_numpy_matrix(G, dtype=int) assert_equal(A.dtype, int) + + +class TestConvertNumpyArray(object): + numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test + @classmethod + def setupClass(cls): + global np + global np_assert_equal + try: + import numpy as np + np_assert_equal=np.testing.assert_equal + except ImportError: + raise SkipTest('NumPy not available.') + + def __init__(self): + self.G1 = barbell_graph(10, 3) + self.G2 = cycle_graph(10, create_using=nx.DiGraph()) + + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def create_weighted(self, G): + g = cycle_graph(4) + G.add_nodes_from(g) + G.add_weighted_edges_from( (u,v,10+u) for u,v in g.edges()) + return G + + def assert_equal(self, G1, G2): + assert_true( sorted(G1.nodes())==sorted(G2.nodes()) ) + assert_true( sorted(G1.edges())==sorted(G2.edges()) ) + + def identity_conversion(self, G, A, create_using): + assert(A.sum() > 0) + GG = nx.from_numpy_array(A, create_using=create_using) + self.assert_equal(G, GG) + GW = nx.to_networkx_graph(A, create_using=create_using) + self.assert_equal(G, GW) + GI = create_using.__class__(A) + self.assert_equal(G, GI) + + def test_shape(self): + "Conversion from non-square array." + A=np.array([[1,2,3],[4,5,6]]) + assert_raises(nx.NetworkXError, nx.from_numpy_array, A) + + def test_identity_graph_array(self): + "Conversion from graph to array to graph." + A = nx.to_numpy_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_array(self): + """Conversion from digraph to array to digraph.""" + A = nx.to_numpy_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_array(self): + """Conversion from weighted graph to array to weighted graph.""" + A = nx.to_numpy_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_array(self): + """Conversion from weighted digraph to array to weighted digraph.""" + A = nx.to_numpy_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to array to graph with nodelist.""" + P4 = path_graph(4) + P3 = path_graph(3) + nodelist = list(P3) + A = nx.to_numpy_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + self.assert_equal(GA, P3) + + # Make nodelist ambiguous by containing duplicates. + nodelist += [nodelist[0]] + assert_raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) ) + P4 = path_graph(4) + A = nx.to_numpy_array(P4) + np_assert_equal(A, nx.to_numpy_array(WP4,weight=None)) + np_assert_equal(0.5*A, nx.to_numpy_array(WP4)) + np_assert_equal(0.3*A, nx.to_numpy_array(WP4,weight='other')) + + def test_from_numpy_array_type(self): + A=np.array([[1]]) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),int) + + A=np.array([[1]]).astype(np.float) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),float) + + A=np.array([[1]]).astype(np.str) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),str) + + A=np.array([[1]]).astype(np.bool) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),bool) + + A=np.array([[1]]).astype(np.complex) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),complex) + + A=np.array([[1]]).astype(np.object) + assert_raises(TypeError,nx.from_numpy_array,A) + + def test_from_numpy_array_dtype(self): + dt=[('weight',float),('cost',int)] + A=np.array([[(1.0,2)]],dtype=dt) + G=nx.from_numpy_array(A) + assert_equal(type(G[0][0]['weight']),float) + assert_equal(type(G[0][0]['cost']),int) + assert_equal(G[0][0]['cost'],2) + assert_equal(G[0][0]['weight'],1.0) + + def test_to_numpy_recarray(self): + G=nx.Graph() + G.add_edge(1,2,weight=7.0,cost=5) + A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)]) + assert_equal(sorted(A.dtype.names),['cost','weight']) + assert_equal(A.weight[0,1],7.0) + assert_equal(A.weight[0,0],0.0) + assert_equal(A.cost[0,1],5) + assert_equal(A.cost[0,0],0) + + def test_numpy_multigraph(self): + G=nx.MultiGraph() + G.add_edge(1,2,weight=7) + G.add_edge(1,2,weight=70) + A=nx.to_numpy_array(G) + assert_equal(A[1,0],77) + A=nx.to_numpy_array(G,multigraph_weight=min) + assert_equal(A[1,0],7) + A=nx.to_numpy_array(G,multigraph_weight=max) + assert_equal(A[1,0],70) + + def test_from_numpy_array_parallel_edges(self): + """Tests that the :func:`networkx.from_numpy_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = np.array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_numpy_array(A, parallel_edges=True, + create_using=nx.DiGraph()) + assert_graphs_equal(actual, expected) + actual = nx.from_numpy_array(A, parallel_edges=False, + create_using=nx.DiGraph()) + assert_graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_numpy_array(A, parallel_edges=True, + create_using=nx.MultiDiGraph()) + assert_graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]['weight'] = 2 + actual = nx.from_numpy_array(A, parallel_edges=False, + create_using=nx.MultiDiGraph()) + assert_graphs_equal(actual, expected) + + def test_symmetric(self): + """Tests that a symmetric array has edges added only once to an + undirected multigraph when using :func:`networkx.from_numpy_array`. + + """ + A = np.array([[0, 1], [1, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiGraph()) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert_graphs_equal(G, expected) + + def test_dtype_int_graph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.complete_graph(3) + A = nx.to_numpy_array(G, dtype=int) + assert_equal(A.dtype, int) + + def test_dtype_int_multigraph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.MultiGraph(nx.complete_graph(3)) + A = nx.to_numpy_array(G, dtype=int) + assert_equal(A.dtype, int)
Add {to, from}_numpy_array functions Should we add a `to_numpy_array` function to generate an `np.array` from a graph and a `from_numpy_array` function for the reverse? These would be similar to the existing `{to, from}_numpy_matrix` functions. **Motivation:** * Times have changed since #895. * `np.matrix` is as good as deprecated, especially after the introduction of the `@` operator for matrix multiplication of arrays in Python 3.5. * `np.matrix` doesn't always produce the same results as `np.array` and can lead to confusion. Add {to, from}_numpy_array functions Should we add a `to_numpy_array` function to generate an `np.array` from a graph and a `from_numpy_array` function for the reverse? These would be similar to the existing `{to, from}_numpy_matrix` functions. **Motivation:** * Times have changed since #895. * `np.matrix` is as good as deprecated, especially after the introduction of the `@` operator for matrix multiplication of arrays in Python 3.5. * `np.matrix` doesn't always produce the same results as `np.array` and can lead to confusion.
NetworkX uses matrix and sparse_matrix for consistency and because the SciPy sparse matrix collection has many good tools associated with it. The sparse matrix mimics the matrix structure though I have read that there are a few differences. Also, to change from a matrix object to an array is fast and easy with ```numpy.asarray(M)```, ```M.A```, ```M.getA()``` and perhaps other methods. I would be Ok with adding a conversion function ```to_numpy_array``` so long as it is easy to maintain. The `to_numpy_array` could be implemented using the existing `to_numpy_matrix` function, so that it remains easy to maintain: ``` def to_numpy_array(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): M = to_numpy_matrix(G, nodelist=nodelist, dtype=dtype, order=order, multigraph_weight=multigraph_weight, weight=weight, nonedge=nonedge) return M.A ``` `from_numpy_array` would require a bit more work, but could be based on `from_numpy_matrix` implementation. I'm very willing to do this and create a PR if you're happy to give me the go ahead. Yes - if you could create PR I'll work to get it in. Thanks Should this be part of the Networkx 2 milestone? As @harryscholes mentions, Python 3-5+ have the @-operator, and the Numpy Matrix class is all but deprecated... Networkx 2 is a release for the future, so it should be convenient to use the more modern array-class instead of the Matrix. @AllanLRH sounds like a good idea to me. I'll get working on it soon NetworkX uses matrix and sparse_matrix for consistency and because the SciPy sparse matrix collection has many good tools associated with it. The sparse matrix mimics the matrix structure though I have read that there are a few differences. Also, to change from a matrix object to an array is fast and easy with ```numpy.asarray(M)```, ```M.A```, ```M.getA()``` and perhaps other methods. I would be Ok with adding a conversion function ```to_numpy_array``` so long as it is easy to maintain. The `to_numpy_array` could be implemented using the existing `to_numpy_matrix` function, so that it remains easy to maintain: ``` def to_numpy_array(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): M = to_numpy_matrix(G, nodelist=nodelist, dtype=dtype, order=order, multigraph_weight=multigraph_weight, weight=weight, nonedge=nonedge) return M.A ``` `from_numpy_array` would require a bit more work, but could be based on `from_numpy_matrix` implementation. I'm very willing to do this and create a PR if you're happy to give me the go ahead. Yes - if you could create PR I'll work to get it in. Thanks Should this be part of the Networkx 2 milestone? As @harryscholes mentions, Python 3-5+ have the @-operator, and the Numpy Matrix class is all but deprecated... Networkx 2 is a release for the future, so it should be convenient to use the more modern array-class instead of the Matrix. @AllanLRH sounds like a good idea to me. I'll get working on it soon
2017-07-18T00:25:42
networkx/networkx
2,525
networkx__networkx-2525
[ "2273" ]
377d0eb34b83a1b742d041351b87a99eb54981a9
diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py --- a/networkx/algorithms/bridges.py +++ b/networkx/algorithms/bridges.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # bridges.py - bridge-finding algorithms # -# Copyright 2004-2016 NetworkX developers. +# Copyright 2004-2017 NetworkX developers. # # This file is part of NetworkX. # @@ -13,7 +13,7 @@ import networkx as nx from networkx.utils import not_implemented_for -__all__ = ['bridges', 'has_bridges'] +__all__ = ['bridges', 'has_bridges', 'local_bridges'] @not_implemented_for('multigraph') @@ -121,3 +121,61 @@ def has_bridges(G, root=None): return False else: return True + + +@not_implemented_for('multigraph') +@not_implemented_for('directed') +def local_bridges(G, with_span=True, weight=None): + """Iterate over local bridges of `G` optionally computing the span + + A *local bridge* is an edge whose endpoints have no common neighbors. + That is, the edge is not part of a triangle in the graph. + + The *span* of a *local bridge* is the shortest path length between + the endpoints if the local bridge is removed. + + Parameters + ---------- + G : undirected graph + + with_span : bool + If True, yield a 3-tuple `(u, v, span)` + + weight : function, string or None (default: None) + If function, used to compute edge weights for the span. + If string, the edge data attribute used in calculating span. + If None, all edges have weight 1. + + Yields + ------ + e : edge + The local bridges as an edge 2-tuple of nodes `(u, v)` or + as a 3-tuple `(u, v, span)` when `with_span is True`. + + Examples + -------- + A cycle graph has every edge a local bridge with span N-1. + + >>> G = nx.cycle_graph(9) + >>> (0, 8, 8) in set(nx.local_bridges(G)) + True + """ + if with_span is not True: + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + yield u, v + else: + wt = nx.weighted._weight_function(G, weight) + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + enodes = {u, v} + def hide_edge(n, nbr, d): + if n not in enodes or nbr not in enodes: + return wt(n, nbr, d) + return None + + try: + span = nx.shortest_path_length(G, u, v, weight=hide_edge) + yield u, v, span + except nx.NetworkXNoPath: + yield u, v, float('inf')
diff --git a/networkx/algorithms/tests/test_bridges.py b/networkx/algorithms/tests/test_bridges.py --- a/networkx/algorithms/tests/test_bridges.py +++ b/networkx/algorithms/tests/test_bridges.py @@ -1,6 +1,6 @@ # test_bridges.py - unit tests for bridge-finding algorithms # -# Copyright 2004-2016 NetworkX developers. +# Copyright 2004-2017 NetworkX developers. # # This file is part of NetworkX. # @@ -8,6 +8,7 @@ # information. """Unit tests for bridge-finding algorithms.""" from unittest import TestCase +from nose.tools import assert_equal, assert_in import networkx as nx @@ -34,3 +35,39 @@ def test_barbell_graph(self): source = 0 bridges = list(nx.bridges(G, source)) self.assertEqual(bridges, [(2, 3)]) + + +class TestLocalBridges(TestCase): + """Unit tests for the local_bridge function.""" + + def setUp(self): + self.BB = nx.barbell_graph(4, 0) + self.square = nx.cycle_graph(4) + self.tri = nx.cycle_graph(3) + + def test_nospan(self): + expected = {(3, 4), (4, 3)} + assert_in(next(nx.local_bridges(self.BB, with_span=False)), expected) + assert_equal(set(nx.local_bridges(self.square, with_span=False)), self.square.edges) + assert_equal(list(nx.local_bridges(self.tri, with_span=False)), []) + + def test_no_weight(self): + inf = float('inf') + expected = {(3, 4, inf), (4, 3, inf)} + assert_in(next(nx.local_bridges(self.BB)), expected) + expected = {(u, v, 3) for u, v, in self.square.edges} + assert_equal(set(nx.local_bridges(self.square)), expected) + assert_equal(list(nx.local_bridges(self.tri)), []) + + def test_weight(self): + inf = float('inf') + G = self.square.copy() + + G.edges[1, 2]['weight'] = 2 + expected = {(u, v, 5 - wt) for u, v, wt in G.edges(data='weight', default=1)} + assert_equal(set(nx.local_bridges(G, weight='weight')), expected) + + expected = {(u, v, 6) for u, v in G.edges} + lb = nx.local_bridges(G, weight=lambda u, v, d: 2) + assert_equal(set(lb), expected) +
Create bridges.py Contains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.
Thanks for this! Could you please add tests and reformat your code to PEP8 specs. @rmsyed Thanks for your contribution! Before you spend the time to reformat your code, I'd like to propose implementing the bridge-finding algorithm given in the Wikipedia article, https://en.wikipedia.org/wiki/Bridge_(graph_theory), which is linear time as opposed to the cubic time of the proposed implementation here. Are interested or able to do that? Hi guys, Okay, so I've updated the bridges.py file to implement Tarjan's algorithm and I've also updated it to include examples for each function. I ran the pep8 check on the code and it fully validated. The problem is that when I commit it, the Travis CI build gives an error for the bridges_exist() function's examples. I'm not sure why considering that it works perfectly fine when I run it locally and considering that all the other examples in the code seem to work. Does anyone have an idea why this is happening? Thanks. When you run your tests, make sure you're doing `nosetests --with-doctest`. Also, your doctest output is different between Python 2 and Python 3 because `print` became a function. Because of how the REPL works, you don't need the `print`s—just put the expression you want evaluated on the line. This is a really useful feature that you've implemented. I notice that `all_local_bridges()` is iterating over the edges, so it's cleaner to do that like this: ``` python bridges = {} for e in G.edges(): G.remove_edge(*e) try: (u, v) = e path_length = nx.shortest_path_length(G, u, v) except nx.NetworkXNoPath: bridges[e] = -1 # found a bridge if first_match: return bridges else: if path_length > 2: # found a local bridge bridges[e] = path_length if first_match: return bridges finally: G.add_edge(*e) return bridges ``` You can also avoid global variables if you alter `all_bridges()` to use an inner function: ``` python def bridges(G): """ Looks through the graph object `G` for all bridges. We formally define a bridge to be any edge `(u, v)` such that the removal of the edge increases the total number of connected components. Parameters ---------- G : Undirected Graph object Returns ---------- iterable Edges that are bridges Examples -------- >>> G = nx.cycle_graph(5) >>> bridges = bridges(G) >>> list(bridges) [] >>> G.remove_edge(0,1) >>> list(bridges(G)) [(2, 1), (3, 2), (4, 3), (0, 4)] Notes ---------- This function can be useful to quickly determine what bridges exist in a given network. We use an implemenation of Tarjan's Bridge-finding algorithm to do this. The algorithm is described in [1]_. References ---------- .. [1] R. Endre Tarjan, "A note on finding the bridges of a graph", *Information Processing Letters*, **2** (6): 160--161 """ visited = set() depths = {} low = {} parent = {node: None for node in G} def bridge_util(u, depth): visited.add(u) depth += 1 depths[u] = low[u] = depth for v in G[u]: if v not in visited: parent[v] = u for e in bridge_util(v, depth): yield e # Check if subtree rooted at v has connection # to an ancestor of u. low[u] = min(low[u], low[v]) # If the lowest vertex reachable from the subtree under v # is below u in the DFS tree, then `u-v` is a bridge. if low[v] > depths[u]: yield (u, v) elif v != parent[u]: low[u] = min(low[u], depths[v]) for u in G: if u not in visited: for e in bridge_util(u, depth=0): yield e ``` This version is a generator, so it would make your `bridges_exist()` function even faster. Hi @rmsyed—looking through the Travis logs, your problem is here: ``` File "/home/travis/virtualenv/pypy3-2.4.0/site-packages/networkx/algorithms/bridges.py", line 217, in all_bridges startnode = allnodes[0] TypeError: 'generator' object is not subscriptable ``` The comment I posted above circumvents that, after which your build should pass. Thanks Arya! Those look like much cleaner implementations and likely more efficient. I made some small tweaks to the code that would ensure that the bridge-finding algorithm ran on all connected components in the network (before it was only running on one). However, it still seems that every time I update and commit the code, the Travis build fails on the same issue. **Edit:** Okay, I just saw your newest comment. I'll try to update the code with your suggestion, although I still don't understand why this issue arises. I removed the print statements as suggested and ran the doctest locally and, at least for Python 2.7, it didn't give any issues (I've attached the output here). So I'm still not sure why its giving errors on the github build. [doctest_trace.txt](https://github.com/networkx/networkx/files/517339/doctest_trace.txt) Hey @rmsyed—Travis is comparing against the dev version of networkx, but I bet that when you run `import networkx as nx`, it gets your local version of networkx, which is (probably) the older 1.11. A lot of the main library functions are now generators. One example is `G.nodes()`. Now that it's a generator, you can duplicate the functionality you want with `next(allnodes)`, which gets the next (in this case, first) item from the generator. There's actually a function in `nx.utils` called `arbitrary_element`, which uses that pattern while making your intent a bit clearer. Of course, you can sidestep that whole process with the algorithm posted above. One more thought: since `all_local_bridges()` may return just the first match, I might rename it `local_bridges()`. For symmetry, I'd rename `all_bridges()` to `bridges()`. Again, I'm really impressed that you tackled something missing from the codebase. It's always scary to venture forth in new directions. Looks like this isn't the first PR of Tarjan's bridge-finding algorithm. See the discussion at #903. Oh, good catch! Was that code ever implemented in a public release of networkx? I couldn't find a bridge function on the networkx public documentation for a `bridges()` function. If it has been implemented, perhaps it would be good to close this pull request or maybe consider narrowing the scope to only the local bridges function. Can you adjust your pull request to create a `bridges()` function that is a wrapper around the existing code that's already in NetworkX? As discussed on the linked pull request above, see the `networkx/algorithms/components/biconnected.py` module. Note that module is for determining 2-node-connectivity, not 2-edge-connectivity. Ok, I've modified the code for the `bridges()` function to use the `biconnected_components()` function to find all bridges. Edit: I just realized the notes in the latest commit were the notes from an earlier commit. I'll update those comments accordingly, but first I want to make sure the code is suitable and appropriate. The `local_bridges` function is needlessly complicated. From my understanding based on the definition you've provided for "local bridge", an edge is a "local bridge" if its two incident nodes do not share a common neighbor. So I think you could do this (assuming a graph with no self-loops): ``` python def local_bridges(G): for u, v in G.edges(): if not (set(G[u]) & set(G[v])): yield u, v ``` Is my understanding correct? Yes, the definition is right and your code does detect local bridges but it doesn't return the span of those bridges which is often important to know. The original function was doing both but I suppose we could use your code, modified slightly to also compute the span. I do agree that your version is simpler although I think the worst case time complexity is the same once we include the code for computing the spans. I'll modify the code accordingly Oh, I see, you are also returning the distance between the two endpoints when the edge is removed. Well how about something like ``` python def local_bridges(G): for u, v in G.edges(): if not (set(G[u]) & set(G[v])): H = G.copy() H.remove_edge(u, v) try: d = nx.shortest_path_length(H, u, v) except NetworkXNoPath: d = float('inf') yield u, v, d ``` If you need some kind of dictionary, you can do ``` python from collections import defaultdict distances = defaultdict(dict) for u, v, d in local_bridges(G): distances[u][v] = distances[v][u] = d ``` It would be nice if there were a faster algorithm for this. Do you have any references for these algorithms, or a reference for local bridges, with which I am not familiar? Your update looks good although I think it might be more efficient to simply remove the edge, check the distance, and then add the edge back rather than creating an entire copy each time. Also I think simply returning -1 for infinite distance might be suitable since span is always an integer. So this might look something like: ``` def local_bridges(G): for u, v in G.edges(): if not (set(G[u]) & set(G[v])): G.remove_edge(u, v) try: d = nx.shortest_path_length(G, u, v) G.add_edge(u,v) except NetworkXNoPath: d = -1 G.add_edge(u,v) yield u, v, d ``` As far as I'm aware, the algorithm for finding all local bridges is linear in O(V+E) and the algorithm for finding distance from some node `u` to `v` is linear in O(V+E) so the algorithm will be quadratic time. It would be interesting if there was a faster solution. The concept of local bridges was first (I think) introduced by Mark Granovetter in his paper "The Strength of Weak Ties" (1973) on pages 1364-1365. He uses the term "degree" instead of span but Easley and Kleinberg in their book "Networks, Crowds and Markets" (2010) use the term "span" (page 52). I prefer to call it span because degree is an ambiguous term which could also refer to simply the number of edges on a node. You can find Easley and Kleinberg's pdf book for free on their site: http://www.cs.cornell.edu/home/kleinber/networks-book/ The reason I suggested using a copy is so that we do not have to modify the user's graph directly. This is something of a convention in NetworkX functions (at least, that's what it seems to me). This is both slow and memory-intensive, yes, but it doesn't perform any unexpected modification of the user's data, and a less memory-hungry version can be implemented directly by the user if needed. As for the running time, it is quadratic if the graph is sparse, but if the graph is dense, the running time is quartic (!), which is particularly unpleasant for large graphs. You can increase the efficiency by only copying the user's graph once, outside of the loop over edges: ``` python def local_bridges(G): H = G.copy(with_data=False) for u, v in G.edges(): ... ``` Then make sure to re-add the edge after removing it. You can avoid copying the graph and also avoid removing the edge by "hiding" the edge using a weight function in the shortest path routine. The weight function would be something like: ``` enodes=set((u,v)) weight = lambda n, nbr, d: 1 if (n not in enodes or nbr not in enodes) else None ``` Then replace `shortest_path_length(G,u,v)` with `shortest_path_length(G,u,v,weight)`
2017-07-19T13:45:20
networkx/networkx
2,526
networkx__networkx-2526
[ "2310" ]
ee73880319dba590e6e3884460e4332f75825e39
diff --git a/networkx/generators/geometric.py b/networkx/generators/geometric.py --- a/networkx/generators/geometric.py +++ b/networkx/generators/geometric.py @@ -94,8 +94,8 @@ def random_geometric_graph(n, radius, dim=2, pos=None, p=2): Which Minkowski distance metric to use. `p` has to meet the condition ``1 <= p <= infinity``. - If this argument is not specified, the :math:`L^2` metric (the Euclidean - distance metric) is used. + If this argument is not specified, the :math:`L^2` metric + (the Euclidean distance metric) is used. This should not be confused with the `p` of an Erdős-Rényi random graph, which represents probability. @@ -293,7 +293,7 @@ def should_join(pair): @nodes_or_number(0) -def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1), +def waxman_graph(n, beta=0.4, alpha=0.1, L=None, domain=(0, 0, 1, 1), metric=None): r"""Return a Waxman random graph. @@ -302,7 +302,7 @@ def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1), joined by an edge with probability .. math:: - p = \alpha \exp(-d / \beta L). + p = \beta \exp(-d / \alpha L). This function implements both Waxman models, using the `L` keyword argument. @@ -316,10 +316,10 @@ def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1), ---------- n : int or iterable Number of nodes or iterable of nodes - alpha: float - Model parameter beta: float Model parameter + alpha: float + Model parameter L : float, optional Maximum distance between nodes. If not specified, the actual distance is calculated. @@ -363,6 +363,13 @@ def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1), .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance + Notes + ----- + Starting in NetworkX 2.0 the parameters alpha and beta align with their + usual roles in the probability distribution. In earlier versions their + positions in the expresssion were reversed. Their position in the calling + sequence reversed as well to minimize backward incompatibility. + References ---------- .. [1] B. M. Waxman, *Routing of multipoint connections*. @@ -392,7 +399,7 @@ def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0, 0, 1, 1), # `pair` is the pair of nodes to decide whether to join. def should_join(pair): - return random.random() < alpha * math.exp(-dist(*pair) / (beta * L)) + return random.random() < beta * math.exp(-dist(*pair) / (alpha * L)) G.add_edges_from(filter(should_join, combinations(G, 2))) return G
diff --git a/networkx/generators/tests/test_geometric.py b/networkx/generators/tests/test_geometric.py --- a/networkx/generators/tests/test_geometric.py +++ b/networkx/generators/tests/test_geometric.py @@ -1,11 +1,15 @@ from itertools import combinations -from math import sqrt from nose.tools import assert_equal from nose.tools import assert_false from nose.tools import assert_true import networkx as nx +from networkx.generators.geometric import euclidean + + +def l1dist(x, y): + return sum(abs(a - b) for a, b in zip(x, y)) class TestRandomGeometricGraph(object): @@ -13,7 +17,6 @@ class TestRandomGeometricGraph(object): function. """ - def test_number_of_nodes(self): G = nx.random_geometric_graph(50, 0.25) assert_equal(len(G), 50) @@ -27,7 +30,7 @@ def test_distances(self): """ # Use the Euclidean metric, the default according to the # documentation. - dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y))) + dist = euclidean G = nx.random_geometric_graph(50, 0.25) for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. @@ -43,7 +46,7 @@ def test_p(self): """ # Use the L1 metric. - dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + dist = l1dist G = nx.random_geometric_graph(50, 0.25, p=1) for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. @@ -62,7 +65,7 @@ def test_node_names(self): G = nx.random_geometric_graph(nodes, 0.25) assert_equal(len(G), len(nodes)) - dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y))) + dist = euclidean for u, v in combinations(G, 2): # Adjacent vertices must be within the given distance. if v in G[u]: @@ -109,7 +112,7 @@ def test_distances(self): """ # Use the Euclidean metric, the default according to the # documentation. - dist = lambda x, y: sqrt(sum((a - b) ** 2 for a, b in zip(x, y))) + dist = euclidean G = nx.geographical_threshold_graph(50, 100) for u, v in combinations(G, 2): # Adjacent vertices must not exceed the threshold. @@ -125,7 +128,7 @@ def test_metric(self): """ # Use the L1 metric. - dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + dist = l1dist G = nx.geographical_threshold_graph(50, 100, metric=dist) for u, v in combinations(G, 2): # Adjacent vertices must not exceed the threshold. @@ -156,7 +159,8 @@ def test_metric(self): generator. """ - dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y)) + # Use the L1 metric. + dist = l1dist G = nx.waxman_graph(50, 0.5, 0.1, metric=dist) assert_equal(len(G), 50)
Waxman graph parameters interchange roles In the Waxman Graph, edges are placed with probability alpha x math.exp(-r/(beta x l)) However, in the usual definition the roles of alpha and beta are interchanged. For example: http://www.huaxiaspace.net/academic/classes/fa00/cse202/project/08Waxman.pdf May be a tough one to fix for backwards compatibility.
Could we switch the order in the calling sequence when we change the names? That should take care of backward compatibility when people haven't used keywords. But it makes the parameter order out of alphabetic order: ```waxman_graph(100, beta, alpha)``` What do you think? Seems reasonable to me. But others may have stronger opinions.
2017-07-19T16:15:17
networkx/networkx
2,532
networkx__networkx-2532
[ "2109" ]
f1601955df3e0e9c221cfb0460b761d1d00a2eca
diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py --- a/networkx/algorithms/mis.py +++ b/networkx/algorithms/mis.py @@ -1,24 +1,26 @@ # -*- coding: utf-8 -*- # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $ -""" -Algorithm to find a maximal (not maximum) independent set. - -""" # Leo Lopes <[email protected]> # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. +# +# Authors: Leo Lopes <[email protected]> +# Loïc Séguin-C. <[email protected]> +""" +Algorithm to find a maximal (not maximum) independent set. -__author__ = "\n".join(["Leo Lopes <[email protected]>", - "Loïc Séguin-C. <[email protected]>"]) +""" +import random +import networkx as nx +from networkx.utils import not_implemented_for __all__ = ['maximal_independent_set'] -import random -import networkx as nx +@not_implemented_for('directed') def maximal_independent_set(G, nodes=None): """Return a random maximal independent set guaranteed to contain a given set of nodes. @@ -27,10 +29,10 @@ def maximal_independent_set(G, nodes=None): of G induced by these nodes contains no edges. A maximal independent set is an independent set such that it is not possible to add a new node and still get an independent set. - + Parameters ---------- - G : NetworkX graph + G : NetworkX graph nodes : list or iterable Nodes that must be part of the independent set. This set of nodes @@ -38,7 +40,7 @@ def maximal_independent_set(G, nodes=None): Returns ------- - indep_nodes : list + indep_nodes : list List of nodes that are part of a maximal independent set. Raises @@ -47,6 +49,9 @@ def maximal_independent_set(G, nodes=None): If the nodes in the provided list are not part of the graph or do not form an independent set, an exception is raised. + NetworkXNotImplemented + If `G` is directed. + Examples -------- >>> G = nx.path_graph(5) @@ -54,7 +59,7 @@ def maximal_independent_set(G, nodes=None): [4, 0, 2] >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP [1, 3] - + Notes ----- This algorithm does not solve the maximum independent set problem. @@ -67,7 +72,7 @@ def maximal_independent_set(G, nodes=None): if not nodes.issubset(G): raise nx.NetworkXUnfeasible( "%s is not a subset of the nodes of G" % nodes) - neighbors = set.union(*[set(G.neighbors(v)) for v in nodes]) + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) if set.intersection(neighbors, nodes): raise nx.NetworkXUnfeasible( "%s is not an independent set of G" % nodes) @@ -76,6 +81,5 @@ def maximal_independent_set(G, nodes=None): while available_nodes: node = random.choice(list(available_nodes)) indep_nodes.append(node) - available_nodes.difference_update(list(G.neighbors(node)) + [node]) + available_nodes.difference_update(list(G.adj[node]) + [node]) return indep_nodes -
diff --git a/networkx/algorithms/tests/test_mis.py b/networkx/algorithms/tests/test_mis.py --- a/networkx/algorithms/tests/test_mis.py +++ b/networkx/algorithms/tests/test_mis.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # $Id: test_maximal_independent_set.py 577 2011-03-01 06:07:53Z lleeoo $ -""" -Tests for maximal (not maximum) independent sets. - -""" # Copyright (C) 2004-2016 by # Leo Lopes <[email protected]> # Aric Hagberg <[email protected]> @@ -12,37 +8,42 @@ # Pieter Swart <[email protected]> # All rights reserved. # BSD license. +# +# Author: Leo Lopes <[email protected]> +""" +Tests for maximal (not maximum) independent sets. -__author__ = """Leo Lopes ([email protected])""" +""" from nose.tools import * import networkx as nx import random + class TestMaximalIndependantSet(object): def setup(self): self.florentine = nx.Graph() - self.florentine.add_edge('Acciaiuoli','Medici') - self.florentine.add_edge('Castellani','Peruzzi') - self.florentine.add_edge('Castellani','Strozzi') - self.florentine.add_edge('Castellani','Barbadori') - self.florentine.add_edge('Medici','Barbadori') - self.florentine.add_edge('Medici','Ridolfi') - self.florentine.add_edge('Medici','Tornabuoni') - self.florentine.add_edge('Medici','Albizzi') - self.florentine.add_edge('Medici','Salviati') - self.florentine.add_edge('Salviati','Pazzi') - self.florentine.add_edge('Peruzzi','Strozzi') - self.florentine.add_edge('Peruzzi','Bischeri') - self.florentine.add_edge('Strozzi','Ridolfi') - self.florentine.add_edge('Strozzi','Bischeri') - self.florentine.add_edge('Ridolfi','Tornabuoni') - self.florentine.add_edge('Tornabuoni','Guadagni') - self.florentine.add_edge('Albizzi','Ginori') - self.florentine.add_edge('Albizzi','Guadagni') - self.florentine.add_edge('Bischeri','Guadagni') - self.florentine.add_edge('Guadagni','Lamberteschi') - + self.florentine.add_edge('Acciaiuoli', 'Medici') + self.florentine.add_edge('Castellani', 'Peruzzi') + self.florentine.add_edge('Castellani', 'Strozzi') + self.florentine.add_edge('Castellani', 'Barbadori') + self.florentine.add_edge('Medici', 'Barbadori') + self.florentine.add_edge('Medici', 'Ridolfi') + self.florentine.add_edge('Medici', 'Tornabuoni') + self.florentine.add_edge('Medici', 'Albizzi') + self.florentine.add_edge('Medici', 'Salviati') + self.florentine.add_edge('Salviati', 'Pazzi') + self.florentine.add_edge('Peruzzi', 'Strozzi') + self.florentine.add_edge('Peruzzi', 'Bischeri') + self.florentine.add_edge('Strozzi', 'Ridolfi') + self.florentine.add_edge('Strozzi', 'Bischeri') + self.florentine.add_edge('Ridolfi', 'Tornabuoni') + self.florentine.add_edge('Tornabuoni', 'Guadagni') + self.florentine.add_edge('Albizzi', 'Ginori') + self.florentine.add_edge('Albizzi', 'Guadagni') + self.florentine.add_edge('Bischeri', 'Guadagni') + self.florentine.add_edge('Guadagni', 'Lamberteschi') + def test_K5(self): """Maximal independent set: K5""" G = nx.complete_graph(5) @@ -63,19 +64,22 @@ def test_exception(self): assert_raises(nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Salviati", "Pazzi"]) + def test_digraph_exception(self): + G = nx.DiGraph([(1, 2), (3, 4)]) + assert_raises(nx.NetworkXNotImplemented, nx.maximal_independent_set, G) + def test_florentine_family(self): G = self.florentine indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"]) assert_equal(sorted(indep), sorted(["Medici", "Bischeri", "Castellani", "Pazzi", - "Ginori", "Lamberteschi"])) + "Ginori", "Lamberteschi"])) def test_bipartite(self): G = nx.complete_bipartite_graph(12, 34) indep = nx.maximal_independent_set(G, [4, 5, 9, 10]) assert_equal(sorted(indep), list(range(12))) - def test_random_graphs(self): """Generate 50 random graphs of different types and sizes and make sure that all sets are independent and maximal.""" @@ -86,4 +90,3 @@ def test_random_graphs(self): neighbors_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS)) for v in set(G.nodes()).difference(IS): assert_true(v in neighbors_of_MIS) -
maximal_independent_set does not work for DiGraph Currently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types.
Is your suggestion a standard definition of maximal independent set for directed graphs? It makes sense that it would be but I didn't find a definition after a little searching. The alternative would be to return an error when the function is called with a directed graph. I am not sure if MIS applies to directed graphs or not actually. I just noted that `all_neighbors` would give the correct result, and I did not particularly see a reason to raise an exception. Then the user would be forced to use `to_undirected` to get the MIS (hence making an unnecessary copy of the `DiGraph`). However, if the literature states that MIS only applies to undirected graphs, then raising an exception makes the most sense. Needs PR to raise exception if called with directed graph.
2017-07-21T22:58:11