Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,23 +5,9 @@ Original file is located at
|
|
| 5 |
https://colab.research.google.com/drive/1omNn2hrbDL_s1qwCOr7ViaIjrRW61YDt
|
| 6 |
"""
|
| 7 |
|
| 8 |
-
# Commented out IPython magic to ensure Python compatibility.
|
| 9 |
-
# %%capture
|
| 10 |
-
# !pip install gradio
|
| 11 |
-
# # !pip install gradio==3.50.2
|
| 12 |
-
|
| 13 |
-
# Commented out IPython magic to ensure Python compatibility.
|
| 14 |
-
# %%capture
|
| 15 |
-
#
|
| 16 |
-
# !pip install cmocean
|
| 17 |
-
# !pip install mesa
|
| 18 |
-
#
|
| 19 |
-
# !pip install opinionated
|
| 20 |
-
|
| 21 |
import random
|
| 22 |
import pandas as pd
|
| 23 |
from mesa import Agent, Model
|
| 24 |
-
from mesa.space import MultiGrid
|
| 25 |
import networkx as nx
|
| 26 |
from mesa.time import RandomActivation
|
| 27 |
from mesa.datacollection import DataCollector
|
|
@@ -29,99 +15,61 @@ import numpy as np
|
|
| 29 |
import seaborn as sns
|
| 30 |
import matplotlib.pyplot as plt
|
| 31 |
import matplotlib as mpl
|
| 32 |
-
|
| 33 |
import cmocean
|
| 34 |
-
|
| 35 |
import tqdm
|
| 36 |
-
|
| 37 |
import scipy as sp
|
| 38 |
-
|
| 39 |
-
# from compress_pickle import dump, load
|
| 40 |
-
|
| 41 |
from scipy.stats import beta
|
| 42 |
-
|
| 43 |
-
# # %%capture
|
| 44 |
-
# !pip install git+https://github.com/MNoichl/opinionated.git#egg=opinionated
|
| 45 |
-
# # import opinionated
|
| 46 |
-
|
| 47 |
import opinionated
|
| 48 |
-
import
|
| 49 |
|
| 50 |
plt.style.use("opinionated_rc")
|
| 51 |
-
# from opinionated.core import download_googlefont
|
| 52 |
-
# download_googlefont('Quicksand', add_to_cache=True)
|
| 53 |
-
# plt.rc('font', family='Quicksand')
|
| 54 |
-
|
| 55 |
-
experiences = {
|
| 56 |
-
'dissident_experiences': [1, 0, 0],
|
| 57 |
-
'supporter_experiences': [1, 1, 1],
|
| 58 |
-
}
|
| 59 |
|
|
|
|
|
|
|
|
|
|
| 60 |
def apply_half_life_decay(data_list, half_life, decay_factors=None):
|
| 61 |
steps = len(data_list)
|
| 62 |
if decay_factors is None or len(decay_factors) < steps:
|
| 63 |
decay_factors = [0.5 ** (i / half_life) for i in range(steps)]
|
| 64 |
-
|
| 65 |
-
return decayed_list
|
| 66 |
-
|
| 67 |
-
half_life = 20
|
| 68 |
-
decay_factors = [0.5 ** (i / half_life) for i in range(200)]
|
| 69 |
|
| 70 |
def get_beta_mean_from_experience_dict(experiences, half_life=20, decay_factors=None):
|
| 71 |
eta = 1e-10
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
)
|
| 76 |
|
| 77 |
def get_beta_sample_from_experience_dict(experiences, half_life=20, decay_factors=None):
|
| 78 |
eta = 1e-10
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
size=1
|
| 83 |
-
)[0]
|
| 84 |
-
|
| 85 |
-
# print(get_beta_mean_from_experience_dict(experiences, half_life, decay_factors))
|
| 86 |
-
# print(get_beta_sample_from_experience_dict(experiences, half_life))
|
| 87 |
-
|
| 88 |
-
#@title Load network functionality
|
| 89 |
|
|
|
|
|
|
|
|
|
|
| 90 |
def generate_community_points(num_communities, total_nodes, powerlaw_exponent=2.0, sigma=0.05, plot=False):
|
| 91 |
-
"""
|
| 92 |
-
Generate 2D points grouped into communities (Gaussian around random centers).
|
| 93 |
-
"""
|
| 94 |
sequence = nx.utils.powerlaw_sequence(num_communities, powerlaw_exponent)
|
| 95 |
probabilities = sequence / np.sum(sequence)
|
| 96 |
-
|
| 97 |
community_assignments = np.random.choice(num_communities, size=total_nodes, p=probabilities)
|
| 98 |
community_sizes = np.bincount(community_assignments)
|
| 99 |
if len(community_sizes) < num_communities:
|
| 100 |
community_sizes = np.pad(community_sizes, (0, num_communities - len(community_sizes)), 'constant')
|
| 101 |
-
|
| 102 |
-
points = []
|
| 103 |
-
community_centers = []
|
| 104 |
-
|
| 105 |
for i in range(num_communities):
|
| 106 |
center = np.random.rand(2)
|
| 107 |
community_centers.append(center)
|
| 108 |
community_points = np.random.normal(center, sigma, (community_sizes[i], 2))
|
| 109 |
points.append(community_points)
|
| 110 |
-
|
| 111 |
points = np.concatenate(points)
|
| 112 |
-
|
| 113 |
if plot:
|
| 114 |
plt.figure(figsize=(8, 8))
|
| 115 |
plt.scatter(points[:, 0], points[:, 1], alpha=0.5)
|
| 116 |
sns.kdeplot(x=points[:, 0], y=points[:, 1], levels=5, color="k", linewidths=1)
|
| 117 |
plt.show()
|
| 118 |
-
|
| 119 |
return points
|
| 120 |
|
| 121 |
def graph_from_coordinates(coords, radius):
|
| 122 |
-
"""
|
| 123 |
-
Create a random geometric graph from an array of coordinates.
|
| 124 |
-
"""
|
| 125 |
kdtree = sp.spatial.cKDTree(coords)
|
| 126 |
edge_indexes = kdtree.query_pairs(radius)
|
| 127 |
g = nx.Graph()
|
|
@@ -129,96 +77,85 @@ def graph_from_coordinates(coords, radius):
|
|
| 129 |
g.add_edges_from(edge_indexes)
|
| 130 |
return g
|
| 131 |
|
| 132 |
-
def plot_graph(graph, positions):
|
| 133 |
-
plt.figure(figsize=(8, 8))
|
| 134 |
-
pos_dict = {i: positions[i] for i in range(len(positions))}
|
| 135 |
-
nx.draw_networkx_nodes(graph, pos_dict, node_size=30, node_color="#1a2340", alpha=0.7)
|
| 136 |
-
nx.draw_networkx_edges(graph, pos_dict, edge_color="grey", width=1, alpha=1)
|
| 137 |
-
plt.show()
|
| 138 |
-
|
| 139 |
def ensure_neighbors(graph):
|
| 140 |
-
"""
|
| 141 |
-
Ensure that all nodes have at least one neighbor.
|
| 142 |
-
"""
|
| 143 |
nodes = list(graph.nodes())
|
| 144 |
for node in nodes:
|
| 145 |
-
if
|
| 146 |
-
|
| 147 |
-
while
|
| 148 |
-
|
| 149 |
-
graph.add_edge(node,
|
| 150 |
return graph
|
| 151 |
|
| 152 |
def compute_homophily(G, attr_name='attr'):
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
return
|
| 156 |
|
| 157 |
def assign_initial_attributes(G, ratio, attr_name='attr'):
|
| 158 |
nodes = list(G.nodes)
|
| 159 |
random.shuffle(nodes)
|
| 160 |
-
|
| 161 |
for i, node in enumerate(nodes):
|
| 162 |
-
G.nodes[node][attr_name] = 0 if i <
|
| 163 |
return G
|
| 164 |
|
| 165 |
def distribute_attributes(G, target_homophily, seed=None, max_iter=10000, cooling_factor=0.9995, attr_name='attr'):
|
| 166 |
random.seed(seed)
|
| 167 |
-
|
| 168 |
temp = 1.0
|
| 169 |
-
|
| 170 |
-
for i in range(max_iter):
|
| 171 |
nodes = list(G.nodes)
|
| 172 |
random.shuffle(nodes)
|
| 173 |
-
for
|
| 174 |
-
if G.nodes[
|
| 175 |
-
G.nodes[
|
| 176 |
break
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
(delta_homophily / temp < 700 and random.random() < np.exp(dir_factor * delta_homophily / temp)):
|
| 184 |
-
current_homophily = new_homophily
|
| 185 |
else:
|
| 186 |
-
G.nodes[
|
| 187 |
-
|
| 188 |
temp *= cooling_factor
|
| 189 |
-
|
| 190 |
return G
|
| 191 |
|
| 192 |
def reindex_graph_to_match_attributes(G1, G2, attr_name):
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
mapping = {
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
def compute_mean(model):
|
| 202 |
-
|
| 203 |
-
return np.mean(agent_estimations)
|
| 204 |
|
| 205 |
def compute_median(model):
|
| 206 |
-
|
| 207 |
-
return np.median(agent_estimations)
|
| 208 |
|
| 209 |
def compute_std(model):
|
| 210 |
-
|
| 211 |
-
return np.std(agent_estimations)
|
| 212 |
|
|
|
|
|
|
|
|
|
|
| 213 |
class PoliticalAgent(Agent):
|
| 214 |
-
"""An agent in the political model.
|
| 215 |
-
Attributes:
|
| 216 |
-
estimation (float): current expectation of political change
|
| 217 |
-
dissident (bool): True if supports regime change
|
| 218 |
-
"""
|
| 219 |
-
|
| 220 |
def __init__(self, unique_id, model, dissident):
|
| 221 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
self.experiences = {
|
| 223 |
'dissident_experiences': [1],
|
| 224 |
'supporter_experiences': [1],
|
|
@@ -229,62 +166,65 @@ class PoliticalAgent(Agent):
|
|
| 229 |
self.dissident = dissident
|
| 230 |
|
| 231 |
def update_estimation(self, network_id):
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
potential_partners = [self.model.id2agent[n] for n in self.model.networks[network_id]['network'].neighbors(self.unique_id)]
|
| 235 |
|
| 236 |
-
current_estimate = get_beta_mean_from_experience_dict(
|
|
|
|
| 237 |
self.estimations.append(current_estimate)
|
| 238 |
self.estimation = current_estimate
|
| 239 |
-
|
|
|
|
|
|
|
| 240 |
self.experiments.append(current_experiment)
|
| 241 |
|
| 242 |
-
if
|
| 243 |
-
|
| 244 |
-
if self.model.networks[network_id]['type'] == 'physical':
|
| 245 |
-
if current_experiment >= self.model.threshold:
|
| 246 |
-
if partner.dissident:
|
| 247 |
-
self.experiences['dissident_experiences'].append(1)
|
| 248 |
-
self.experiences['supporter_experiences'].append(0)
|
| 249 |
-
else:
|
| 250 |
-
self.experiences['dissident_experiences'].append(0)
|
| 251 |
-
self.experiences['supporter_experiences'].append(1)
|
| 252 |
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
else:
|
| 256 |
-
partner.experiences['dissident_experiences'].append(0)
|
| 257 |
-
partner.experiences['supporter_experiences'].append(1 * self.model.social_learning_factor)
|
| 258 |
|
| 259 |
-
|
|
|
|
| 260 |
if partner.dissident:
|
| 261 |
-
self.experiences['dissident_experiences'].append(1
|
| 262 |
self.experiences['supporter_experiences'].append(0)
|
| 263 |
else:
|
| 264 |
self.experiences['dissident_experiences'].append(0)
|
| 265 |
-
self.experiences['supporter_experiences'].append(1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
|
| 267 |
def combine_estimations(self):
|
| 268 |
-
#
|
| 269 |
if not hasattr(self, "current_estimations"):
|
| 270 |
return
|
| 271 |
values = [list(d.values())[0] for d in self.current_estimations]
|
| 272 |
if len(values) > 0:
|
| 273 |
-
|
| 274 |
-
if len(
|
| 275 |
-
self.estimation = np.mean(
|
| 276 |
|
| 277 |
def step(self):
|
| 278 |
if not hasattr(self, 'current_estimations'):
|
| 279 |
self.current_estimations = []
|
| 280 |
-
for
|
| 281 |
-
self.update_estimation(
|
| 282 |
self.combine_estimations()
|
| 283 |
del self.current_estimations
|
| 284 |
|
| 285 |
class PoliticalModel(Model):
|
| 286 |
-
"""A model of a political system with multiple interacting agents."""
|
| 287 |
-
|
| 288 |
def __init__(
|
| 289 |
self,
|
| 290 |
n_agents,
|
|
@@ -302,7 +242,7 @@ class PoliticalModel(Model):
|
|
| 302 |
intervention_list=None,
|
| 303 |
rng_seed=None,
|
| 304 |
):
|
| 305 |
-
#
|
| 306 |
try:
|
| 307 |
super().__init__(rng_seed=rng_seed) # Mesa >= 3.0
|
| 308 |
except TypeError:
|
|
@@ -322,10 +262,10 @@ class PoliticalModel(Model):
|
|
| 322 |
self.print_frequency = print_frequency
|
| 323 |
self.early_stopping_steps = early_stopping_steps
|
| 324 |
self.early_stopping_range = early_stopping_range
|
|
|
|
| 325 |
|
| 326 |
self.mean_estimations = []
|
| 327 |
self.decay_factors = [0.5 ** (i / self.half_life) for i in range(500)]
|
| 328 |
-
|
| 329 |
self.running = True
|
| 330 |
self.share_regime_supporters = share_regime_supporters
|
| 331 |
|
|
@@ -335,9 +275,7 @@ class PoliticalModel(Model):
|
|
| 335 |
# Align attributes across networks and compute homophilies
|
| 336 |
for i, this_network in enumerate(self.networks):
|
| 337 |
self.networks[this_network]["network"] = assign_initial_attributes(
|
| 338 |
-
self.networks[this_network]["network"],
|
| 339 |
-
self.share_regime_supporters,
|
| 340 |
-
attr_name='dissident'
|
| 341 |
)
|
| 342 |
if 'homophily' in self.networks[this_network]:
|
| 343 |
self.networks[this_network]["network"] = distribute_attributes(
|
|
@@ -347,20 +285,17 @@ class PoliticalModel(Model):
|
|
| 347 |
cooling_factor=0.995,
|
| 348 |
attr_name='dissident'
|
| 349 |
)
|
|
|
|
| 350 |
self.networks[this_network]['network_data_to_keep']['actual_homophily'] = compute_homophily(
|
| 351 |
-
self.networks[this_network]["network"],
|
| 352 |
-
attr_name='dissident'
|
| 353 |
)
|
| 354 |
if i > 0:
|
| 355 |
-
# Reindex so node ids match across networks
|
| 356 |
first_key = next(iter(self.networks))
|
| 357 |
self.networks[this_network]["network"] = reindex_graph_to_match_attributes(
|
| 358 |
-
self.networks[first_key]["network"],
|
| 359 |
-
self.networks[this_network]["network"],
|
| 360 |
-
'dissident'
|
| 361 |
)
|
| 362 |
|
| 363 |
-
# Create agents and
|
| 364 |
self.id2agent = {}
|
| 365 |
first_key = next(iter(self.networks))
|
| 366 |
for i in range(self.num_agents):
|
|
@@ -375,16 +310,13 @@ class PoliticalModel(Model):
|
|
| 375 |
"Median": compute_median,
|
| 376 |
"STD": compute_std
|
| 377 |
}
|
| 378 |
-
|
| 379 |
for this_network in self.networks:
|
| 380 |
if 'network_data_to_keep' in self.networks[this_network]:
|
| 381 |
for key, value in self.networks[this_network]['network_data_to_keep'].items():
|
| 382 |
attr_name = this_network + '_' + key
|
| 383 |
setattr(self, attr_name, value)
|
| 384 |
-
|
| 385 |
def reporter(model, attr_name=attr_name):
|
| 386 |
return getattr(model, attr_name)
|
| 387 |
-
|
| 388 |
model_reporters[attr_name] = reporter
|
| 389 |
|
| 390 |
if agent_reporters:
|
|
@@ -401,44 +333,37 @@ class PoliticalModel(Model):
|
|
| 401 |
# Interventions
|
| 402 |
for this_intervention in self.intervention_list:
|
| 403 |
if this_intervention['time'] == len(self.mean_estimations):
|
| 404 |
-
|
| 405 |
if this_intervention['type'] == 'threshold_adjustment':
|
| 406 |
self.threshold = max(0, min(1, self.threshold + this_intervention['strength']))
|
| 407 |
-
|
| 408 |
if this_intervention['type'] == 'share_adjustment':
|
| 409 |
target_supporter_share = max(0, min(1, self.share_regime_supporters + this_intervention['strength']))
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
current_supporters = sum(not agent.dissident for agent in agents)
|
| 413 |
total_agents = len(agents)
|
| 414 |
-
current_share = current_supporters / total_agents
|
| 415 |
-
|
| 416 |
required_supporters = int(target_supporter_share * total_agents)
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
agent.dissident = True
|
| 427 |
-
|
| 428 |
if this_intervention['type'] == 'social_media_adjustment':
|
| 429 |
-
self.social_media_factor = max(0, min(1, self.social_media_factor +
|
| 430 |
|
| 431 |
self.schedule.step()
|
| 432 |
-
|
| 433 |
-
self.mean_estimations.append(current_mean_estimation)
|
| 434 |
|
| 435 |
if len(self.mean_estimations) >= self.early_stopping_steps:
|
| 436 |
-
|
| 437 |
-
if max(
|
| 438 |
self.running = False
|
| 439 |
|
| 440 |
-
|
| 441 |
-
|
|
|
|
| 442 |
def run_and_plot_simulation(
|
| 443 |
separate_agent_types=False,
|
| 444 |
n_agents=300,
|
|
@@ -458,57 +383,49 @@ def run_and_plot_simulation(
|
|
| 458 |
social_media_network_type_powerlaw_exponent=3,
|
| 459 |
social_media_network_type='Powerlaw',
|
| 460 |
use_social_media_network=False,
|
| 461 |
-
social_media_factor=1.0,
|
| 462 |
rng_seed=None
|
| 463 |
):
|
| 464 |
print(physical_network_type)
|
| 465 |
-
|
| 466 |
networks = {}
|
| 467 |
|
| 468 |
# Physical network
|
| 469 |
if physical_network_type == 'Fully Connected':
|
| 470 |
G = nx.complete_graph(n_agents)
|
| 471 |
networks['physical'] = {"network": G, "type": "physical", "positions": nx.circular_layout(G)}
|
| 472 |
-
|
| 473 |
elif physical_network_type == "Powerlaw":
|
| 474 |
s = nx.utils.powerlaw_sequence(n_agents, powerlaw_exponent)
|
| 475 |
G = nx.expected_degree_graph(s, selfloops=False)
|
| 476 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 477 |
networks['physical'] = {"network": G, "type": "physical", "positions": nx.kamada_kawai_layout(G)}
|
| 478 |
-
|
| 479 |
elif physical_network_type == "Random Geometric":
|
| 480 |
-
|
| 481 |
-
G = graph_from_coordinates(
|
| 482 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 483 |
-
networks['physical'] = {"network": G, "type": "physical", "positions":
|
| 484 |
|
| 485 |
if introduce_physical_homophily_true_false:
|
| 486 |
networks['physical']['homophily'] = physical_homophily
|
| 487 |
-
networks['physical']
|
| 488 |
|
| 489 |
# Social media network
|
| 490 |
if use_social_media_network:
|
| 491 |
if social_media_network_type == 'Fully Connected':
|
| 492 |
G = nx.complete_graph(n_agents)
|
| 493 |
networks['social_media'] = {"network": G, "type": "social_media", "positions": nx.circular_layout(G)}
|
| 494 |
-
|
| 495 |
elif social_media_network_type == "Powerlaw":
|
| 496 |
s = nx.utils.powerlaw_sequence(n_agents, social_media_network_type_powerlaw_exponent)
|
| 497 |
G = nx.expected_degree_graph(s, selfloops=False)
|
| 498 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 499 |
networks['social_media'] = {"network": G, "type": "social_media", "positions": nx.kamada_kawai_layout(G)}
|
| 500 |
-
|
| 501 |
elif social_media_network_type == "Random Geometric":
|
| 502 |
-
|
| 503 |
-
G = graph_from_coordinates(
|
| 504 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 505 |
-
networks['social_media'] = {"network": G, "type": "social_media", "positions":
|
| 506 |
-
|
| 507 |
if introduce_social_media_homophily_true_false:
|
| 508 |
networks['social_media']['homophily'] = social_media_homophily
|
| 509 |
-
networks['social_media']
|
| 510 |
-
|
| 511 |
-
intervention_list = []
|
| 512 |
|
| 513 |
model = PoliticalModel(
|
| 514 |
n_agents,
|
|
@@ -516,12 +433,12 @@ def run_and_plot_simulation(
|
|
| 516 |
share_regime_supporters,
|
| 517 |
threshold,
|
| 518 |
social_learning_factor=social_learning_factor,
|
| 519 |
-
social_media_factor=social_media_factor,
|
| 520 |
half_life=half_life,
|
| 521 |
print_agents=False,
|
| 522 |
print_frequency=50,
|
| 523 |
agent_reporters=True,
|
| 524 |
-
intervention_list=
|
| 525 |
rng_seed=rng_seed
|
| 526 |
)
|
| 527 |
|
|
@@ -531,75 +448,64 @@ def run_and_plot_simulation(
|
|
| 531 |
agent_df = model.datacollector.get_agent_vars_dataframe().reset_index()
|
| 532 |
agent_df_pivot = agent_df.pivot(index='Step', columns='AgentID', values='Estimation')
|
| 533 |
|
| 534 |
-
|
|
|
|
| 535 |
if not separate_agent_types:
|
| 536 |
-
for
|
| 537 |
-
plt.plot(agent_df_pivot.index, agent_df_pivot[
|
| 538 |
-
|
| 539 |
-
plt.plot(
|
| 540 |
else:
|
| 541 |
colors = {1: '#d6a44b', 0: '#1b4968'}
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
for agent_type, color in colors.items():
|
| 549 |
-
mean_estimation = agent_df_pivot.loc[:, agent_df[agent_df['Dissident'] == agent_type]['AgentID']].mean(axis=1)
|
| 550 |
-
plt.plot(mean_estimation.index, mean_estimation, color=color, linewidth=2, label=f'{labels[agent_type]}')
|
| 551 |
plt.legend(loc='lower right')
|
| 552 |
|
| 553 |
plt.title('Agent Estimation Over Time', loc='right')
|
| 554 |
plt.xlabel('Time step')
|
| 555 |
plt.ylabel('Estimation')
|
| 556 |
-
|
| 557 |
plt.savefig('run_plot.png', bbox_inches='tight', dpi=400, transparent=True)
|
| 558 |
run_plot = PIL.Image.open('run_plot.png').convert('RGBA')
|
| 559 |
|
| 560 |
# Network plot
|
| 561 |
n_networks = len(networks)
|
| 562 |
-
|
| 563 |
if n_networks == 1:
|
| 564 |
axs = [axs]
|
| 565 |
|
| 566 |
-
estimations = {}
|
| 567 |
-
for
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
if 'positions' in network_dict:
|
| 575 |
-
pos = network_dict['positions']
|
| 576 |
-
else:
|
| 577 |
-
pos = nx.kamada_kawai_layout(network)
|
| 578 |
-
|
| 579 |
-
node_colors = [estimations[node] for node in network.nodes]
|
| 580 |
-
axs[idx].set_title(f'Network: {network_id}', loc='right')
|
| 581 |
-
|
| 582 |
nx.draw_networkx_nodes(
|
| 583 |
-
|
| 584 |
cmap=cmocean.tools.crop_by_percent(cmocean.cm.curl, 20, which='both', N=None),
|
| 585 |
vmin=0, vmax=1, ax=axs[idx]
|
| 586 |
)
|
| 587 |
-
nx.draw_networkx_edges(
|
| 588 |
-
|
| 589 |
sm = mpl.cm.ScalarMappable(
|
| 590 |
cmap=cmocean.tools.crop_by_percent(cmocean.cm.curl, 20, which='both', N=None),
|
| 591 |
norm=plt.Normalize(vmin=0, vmax=1)
|
| 592 |
)
|
| 593 |
sm.set_array([])
|
| 594 |
-
|
| 595 |
|
| 596 |
plt.savefig('network_plot.png', bbox_inches='tight', dpi=400, transparent=True)
|
| 597 |
network_plot = PIL.Image.open('network_plot.png').convert('RGBA')
|
| 598 |
|
| 599 |
return run_plot, network_plot
|
| 600 |
|
|
|
|
|
|
|
|
|
|
| 601 |
import gradio as gr
|
| 602 |
-
import matplotlib.pyplot as plt
|
| 603 |
|
| 604 |
with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
|
| 605 |
with gr.Column():
|
|
@@ -610,25 +516,21 @@ Vary the parameters below, and click 'Run Simulation' to run.
|
|
| 610 |
with gr.Column():
|
| 611 |
with gr.Group():
|
| 612 |
separate_agent_types = gr.Checkbox(value=False, label="Separate agent types in plot")
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
half_life_slider = gr.Slider(minimum=5, maximum=50, step=5, label="Half-Life", value=20)
|
| 620 |
|
| 621 |
# Physical network settings
|
| 622 |
with gr.Group():
|
| 623 |
gr.Markdown("""**Physical Network Settings:**""")
|
| 624 |
introduce_physical_homophily_true_false = gr.Checkbox(value=False, label="Stipulate Homophily")
|
| 625 |
-
|
| 626 |
with gr.Group(visible=False) as homophily_group:
|
| 627 |
physical_homophily = gr.Slider(0, 1, label="Homophily", info='How much homophily to stipulate.')
|
| 628 |
-
|
| 629 |
def update_homophily_group_visibility(checkbox_state):
|
| 630 |
return {homophily_group: gr.Group(visible=checkbox_state)}
|
| 631 |
-
|
| 632 |
introduce_physical_homophily_true_false.change(
|
| 633 |
update_homophily_group_visibility,
|
| 634 |
inputs=introduce_physical_homophily_true_false,
|
|
@@ -637,83 +539,70 @@ Vary the parameters below, and click 'Run Simulation' to run.
|
|
| 637 |
|
| 638 |
physical_network_type = gr.Dropdown(label="Physical Network Type", value="Fully Connected",
|
| 639 |
choices=["Fully Connected", "Random Geometric", "Powerlaw"])
|
| 640 |
-
|
| 641 |
with gr.Group(visible=True) as physical_network_type_fully_connected_group:
|
| 642 |
gr.Markdown("""""")
|
| 643 |
-
|
| 644 |
with gr.Group(visible=False) as physical_network_type_random_geometric_group:
|
| 645 |
-
physical_network_type_random_geometric_radius = gr.Slider(
|
| 646 |
-
|
| 647 |
with gr.Group(visible=False) as physical_network_type_powerlaw_group:
|
| 648 |
-
physical_network_type_random_geometric_powerlaw_exponent = gr.Slider(
|
| 649 |
-
|
| 650 |
def update_sliders(option):
|
| 651 |
return {
|
| 652 |
physical_network_type_fully_connected_group: gr.Group(visible=option == "Fully Connected"),
|
| 653 |
physical_network_type_random_geometric_group: gr.Group(visible=option == "Random Geometric"),
|
| 654 |
physical_network_type_powerlaw_group: gr.Group(visible=option == "Powerlaw")
|
| 655 |
}
|
| 656 |
-
|
| 657 |
physical_network_type.change(
|
| 658 |
update_sliders,
|
| 659 |
inputs=physical_network_type,
|
| 660 |
-
outputs=[
|
| 661 |
-
|
| 662 |
-
|
|
|
|
|
|
|
| 663 |
)
|
| 664 |
|
| 665 |
# Social media settings
|
| 666 |
use_social_media_network = gr.Checkbox(value=False, label="Use social media network")
|
| 667 |
with gr.Group(visible=False) as social_media_group:
|
| 668 |
gr.Markdown("""**Social Media Network Settings:**""")
|
| 669 |
-
|
| 670 |
social_media_factor = gr.Slider(0, 2, label="Social Media Factor",
|
| 671 |
info='Weight of social media vs learning in the real world.',
|
| 672 |
value=1.0)
|
| 673 |
introduce_social_media_homophily_true_false = gr.Checkbox(value=False, label="Stipulate Homophily")
|
| 674 |
-
|
| 675 |
with gr.Group(visible=False) as social_media_homophily_group:
|
| 676 |
social_media_homophily = gr.Slider(0, 1, label="Homophily", info='How much homophily to stipulate in social media network.')
|
| 677 |
-
|
| 678 |
def update_social_media_homophily_group_visibility(checkbox_state):
|
| 679 |
return {social_media_homophily_group: gr.Group(visible=checkbox_state)}
|
| 680 |
-
|
| 681 |
introduce_social_media_homophily_true_false.change(
|
| 682 |
update_social_media_homophily_group_visibility,
|
| 683 |
inputs=introduce_social_media_homophily_true_false,
|
| 684 |
outputs=social_media_homophily_group
|
| 685 |
)
|
| 686 |
-
|
| 687 |
social_media_network_type = gr.Dropdown(label="Social Media Network Type", value="Fully Connected",
|
| 688 |
choices=["Fully Connected", "Random Geometric", "Powerlaw"])
|
| 689 |
-
|
| 690 |
with gr.Group(visible=True) as social_media_network_type_fully_connected_group:
|
| 691 |
gr.Markdown("""""")
|
| 692 |
-
|
| 693 |
with gr.Group(visible=False) as social_media_network_type_random_geometric_group:
|
| 694 |
-
social_media_network_type_random_geometric_radius = gr.Slider(
|
| 695 |
-
|
| 696 |
with gr.Group(visible=False) as social_media_network_type_powerlaw_group:
|
| 697 |
-
social_media_network_type_powerlaw_exponent = gr.Slider(
|
| 698 |
-
|
| 699 |
def update_social_media_network_sliders(option):
|
| 700 |
return {
|
| 701 |
social_media_network_type_fully_connected_group: gr.Group(visible=option == "Fully Connected"),
|
| 702 |
social_media_network_type_random_geometric_group: gr.Group(visible=option == "Random Geometric"),
|
| 703 |
social_media_network_type_powerlaw_group: gr.Group(visible=option == "Powerlaw")
|
| 704 |
}
|
| 705 |
-
|
| 706 |
social_media_network_type.change(
|
| 707 |
update_social_media_network_sliders,
|
| 708 |
inputs=social_media_network_type,
|
| 709 |
-
outputs=[
|
| 710 |
-
|
| 711 |
-
|
|
|
|
|
|
|
| 712 |
)
|
| 713 |
-
|
| 714 |
def update_social_media_group_visibility(checkbox_state):
|
| 715 |
return {social_media_group: gr.Group(visible=checkbox_state)}
|
| 716 |
-
|
| 717 |
use_social_media_network.change(
|
| 718 |
update_social_media_group_visibility,
|
| 719 |
inputs=use_social_media_network,
|
|
@@ -726,8 +615,7 @@ Vary the parameters below, and click 'Run Simulation' to run.
|
|
| 726 |
network_output = gr.Image(label="Networks")
|
| 727 |
|
| 728 |
def run_simulation_and_plot(*args):
|
| 729 |
-
|
| 730 |
-
return fig
|
| 731 |
|
| 732 |
button.click(
|
| 733 |
run_simulation_and_plot,
|
|
@@ -750,11 +638,10 @@ Vary the parameters below, and click 'Run Simulation' to run.
|
|
| 750 |
social_media_network_type_powerlaw_exponent,
|
| 751 |
social_media_network_type,
|
| 752 |
use_social_media_network,
|
| 753 |
-
social_media_factor,
|
| 754 |
],
|
| 755 |
outputs=[plot_output, network_output]
|
| 756 |
)
|
| 757 |
|
| 758 |
-
# Launch the interface
|
| 759 |
if __name__ == "__main__":
|
| 760 |
demo.launch(debug=True)
|
|
|
|
| 5 |
https://colab.research.google.com/drive/1omNn2hrbDL_s1qwCOr7ViaIjrRW61YDt
|
| 6 |
"""
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
import random
|
| 9 |
import pandas as pd
|
| 10 |
from mesa import Agent, Model
|
|
|
|
| 11 |
import networkx as nx
|
| 12 |
from mesa.time import RandomActivation
|
| 13 |
from mesa.datacollection import DataCollector
|
|
|
|
| 15 |
import seaborn as sns
|
| 16 |
import matplotlib.pyplot as plt
|
| 17 |
import matplotlib as mpl
|
|
|
|
| 18 |
import cmocean
|
|
|
|
| 19 |
import tqdm
|
|
|
|
| 20 |
import scipy as sp
|
|
|
|
|
|
|
|
|
|
| 21 |
from scipy.stats import beta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
import opinionated
|
| 23 |
+
import PIL
|
| 24 |
|
| 25 |
plt.style.use("opinionated_rc")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
# -----------------------------
|
| 28 |
+
# Decayed Beta helpers
|
| 29 |
+
# -----------------------------
|
| 30 |
def apply_half_life_decay(data_list, half_life, decay_factors=None):
|
| 31 |
steps = len(data_list)
|
| 32 |
if decay_factors is None or len(decay_factors) < steps:
|
| 33 |
decay_factors = [0.5 ** (i / half_life) for i in range(steps)]
|
| 34 |
+
return [data_list[i] * decay_factors[steps - 1 - i] for i in range(steps)]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
def get_beta_mean_from_experience_dict(experiences, half_life=20, decay_factors=None):
|
| 37 |
eta = 1e-10
|
| 38 |
+
a = sum(apply_half_life_decay(experiences['dissident_experiences'], half_life, decay_factors)) + eta
|
| 39 |
+
b = sum(apply_half_life_decay(experiences['supporter_experiences'], half_life, decay_factors)) + eta
|
| 40 |
+
return beta.mean(a, b)
|
|
|
|
| 41 |
|
| 42 |
def get_beta_sample_from_experience_dict(experiences, half_life=20, decay_factors=None):
|
| 43 |
eta = 1e-10
|
| 44 |
+
a = sum(apply_half_life_decay(experiences['dissident_experiences'], half_life, decay_factors)) + eta
|
| 45 |
+
b = sum(apply_half_life_decay(experiences['supporter_experiences'], half_life, decay_factors)) + eta
|
| 46 |
+
return beta.rvs(a, b, size=1)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
# -----------------------------
|
| 49 |
+
# Network helpers
|
| 50 |
+
# -----------------------------
|
| 51 |
def generate_community_points(num_communities, total_nodes, powerlaw_exponent=2.0, sigma=0.05, plot=False):
|
|
|
|
|
|
|
|
|
|
| 52 |
sequence = nx.utils.powerlaw_sequence(num_communities, powerlaw_exponent)
|
| 53 |
probabilities = sequence / np.sum(sequence)
|
|
|
|
| 54 |
community_assignments = np.random.choice(num_communities, size=total_nodes, p=probabilities)
|
| 55 |
community_sizes = np.bincount(community_assignments)
|
| 56 |
if len(community_sizes) < num_communities:
|
| 57 |
community_sizes = np.pad(community_sizes, (0, num_communities - len(community_sizes)), 'constant')
|
| 58 |
+
points, community_centers = [], []
|
|
|
|
|
|
|
|
|
|
| 59 |
for i in range(num_communities):
|
| 60 |
center = np.random.rand(2)
|
| 61 |
community_centers.append(center)
|
| 62 |
community_points = np.random.normal(center, sigma, (community_sizes[i], 2))
|
| 63 |
points.append(community_points)
|
|
|
|
| 64 |
points = np.concatenate(points)
|
|
|
|
| 65 |
if plot:
|
| 66 |
plt.figure(figsize=(8, 8))
|
| 67 |
plt.scatter(points[:, 0], points[:, 1], alpha=0.5)
|
| 68 |
sns.kdeplot(x=points[:, 0], y=points[:, 1], levels=5, color="k", linewidths=1)
|
| 69 |
plt.show()
|
|
|
|
| 70 |
return points
|
| 71 |
|
| 72 |
def graph_from_coordinates(coords, radius):
|
|
|
|
|
|
|
|
|
|
| 73 |
kdtree = sp.spatial.cKDTree(coords)
|
| 74 |
edge_indexes = kdtree.query_pairs(radius)
|
| 75 |
g = nx.Graph()
|
|
|
|
| 77 |
g.add_edges_from(edge_indexes)
|
| 78 |
return g
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
def ensure_neighbors(graph):
|
|
|
|
|
|
|
|
|
|
| 81 |
nodes = list(graph.nodes())
|
| 82 |
for node in nodes:
|
| 83 |
+
if graph.degree(node) == 0:
|
| 84 |
+
other = random.choice(nodes)
|
| 85 |
+
while other == node:
|
| 86 |
+
other = random.choice(nodes)
|
| 87 |
+
graph.add_edge(node, other)
|
| 88 |
return graph
|
| 89 |
|
| 90 |
def compute_homophily(G, attr_name='attr'):
|
| 91 |
+
same = sum(G.nodes[n1][attr_name] == G.nodes[n2][attr_name] for n1, n2 in G.edges())
|
| 92 |
+
m = G.number_of_edges()
|
| 93 |
+
return same / m if m > 0 else 0
|
| 94 |
|
| 95 |
def assign_initial_attributes(G, ratio, attr_name='attr'):
|
| 96 |
nodes = list(G.nodes)
|
| 97 |
random.shuffle(nodes)
|
| 98 |
+
k = int(ratio * len(nodes))
|
| 99 |
for i, node in enumerate(nodes):
|
| 100 |
+
G.nodes[node][attr_name] = 0 if i < k else 1
|
| 101 |
return G
|
| 102 |
|
| 103 |
def distribute_attributes(G, target_homophily, seed=None, max_iter=10000, cooling_factor=0.9995, attr_name='attr'):
|
| 104 |
random.seed(seed)
|
| 105 |
+
current = compute_homophily(G, attr_name)
|
| 106 |
temp = 1.0
|
| 107 |
+
for _ in range(max_iter):
|
|
|
|
| 108 |
nodes = list(G.nodes)
|
| 109 |
random.shuffle(nodes)
|
| 110 |
+
for n1, n2 in zip(nodes[::2], nodes[1::2]):
|
| 111 |
+
if G.nodes[n1][attr_name] != G.nodes[n2][attr_name]:
|
| 112 |
+
G.nodes[n1][attr_name], G.nodes[n2][attr_name] = G.nodes[n2][attr_name], G.nodes[n1][attr_name]
|
| 113 |
break
|
| 114 |
+
new = compute_homophily(G, attr_name)
|
| 115 |
+
delta = new - current
|
| 116 |
+
dir_factor = np.sign(target_homophily - current)
|
| 117 |
+
if abs(new - target_homophily) < abs(current - target_homophily) or \
|
| 118 |
+
(delta / temp < 700 and random.random() < np.exp(dir_factor * delta / temp)):
|
| 119 |
+
current = new
|
|
|
|
|
|
|
| 120 |
else:
|
| 121 |
+
G.nodes[n1][attr_name], G.nodes[n2][attr_name] = G.nodes[n2][attr_name], G.nodes[n1][attr_name]
|
|
|
|
| 122 |
temp *= cooling_factor
|
|
|
|
| 123 |
return G
|
| 124 |
|
| 125 |
def reindex_graph_to_match_attributes(G1, G2, attr_name):
|
| 126 |
+
g1_sorted = sorted(G1.nodes(data=True), key=lambda x: x[1][attr_name])
|
| 127 |
+
g2_sorted = sorted(G2.nodes(data=True), key=lambda x: x[1][attr_name])
|
| 128 |
+
mapping = {g2[0]: g1[0] for g2, g1 in zip(g2_sorted, g1_sorted)}
|
| 129 |
+
return nx.relabel_nodes(G2, mapping)
|
| 130 |
+
|
| 131 |
+
# -----------------------------
|
| 132 |
+
# Reporters
|
| 133 |
+
# -----------------------------
|
| 134 |
def compute_mean(model):
|
| 135 |
+
return np.mean([a.estimation for a in model.schedule.agents])
|
|
|
|
| 136 |
|
| 137 |
def compute_median(model):
|
| 138 |
+
return np.median([a.estimation for a in model.schedule.agents])
|
|
|
|
| 139 |
|
| 140 |
def compute_std(model):
|
| 141 |
+
return np.std([a.estimation for a in model.schedule.agents])
|
|
|
|
| 142 |
|
| 143 |
+
# -----------------------------
|
| 144 |
+
# Agent and Model
|
| 145 |
+
# -----------------------------
|
| 146 |
class PoliticalAgent(Agent):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
def __init__(self, unique_id, model, dissident):
|
| 148 |
+
# Mesa versions differ here. Try the new signature, then fall back.
|
| 149 |
+
try:
|
| 150 |
+
super().__init__(unique_id, model)
|
| 151 |
+
except TypeError:
|
| 152 |
+
super().__init__() # object.__init__ without args
|
| 153 |
+
self.unique_id = unique_id
|
| 154 |
+
self.model = model
|
| 155 |
+
# provide .random like classic Mesa Agent did
|
| 156 |
+
if hasattr(model, "random"):
|
| 157 |
+
self.random = model.random
|
| 158 |
+
|
| 159 |
self.experiences = {
|
| 160 |
'dissident_experiences': [1],
|
| 161 |
'supporter_experiences': [1],
|
|
|
|
| 166 |
self.dissident = dissident
|
| 167 |
|
| 168 |
def update_estimation(self, network_id):
|
| 169 |
+
partners = [self.model.id2agent[n]
|
| 170 |
+
for n in self.model.networks[network_id]['network'].neighbors(self.unique_id)]
|
|
|
|
| 171 |
|
| 172 |
+
current_estimate = get_beta_mean_from_experience_dict(
|
| 173 |
+
self.experiences, half_life=self.model.half_life, decay_factors=self.model.decay_factors)
|
| 174 |
self.estimations.append(current_estimate)
|
| 175 |
self.estimation = current_estimate
|
| 176 |
+
|
| 177 |
+
current_experiment = get_beta_sample_from_experience_dict(
|
| 178 |
+
self.experiences, half_life=self.model.half_life, decay_factors=self.model.decay_factors)
|
| 179 |
self.experiments.append(current_experiment)
|
| 180 |
|
| 181 |
+
if not partners:
|
| 182 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
+
partner = random.choice(partners)
|
| 185 |
+
ntype = self.model.networks[network_id]['type']
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
+
if ntype == 'physical':
|
| 188 |
+
if current_experiment >= self.model.threshold:
|
| 189 |
if partner.dissident:
|
| 190 |
+
self.experiences['dissident_experiences'].append(1)
|
| 191 |
self.experiences['supporter_experiences'].append(0)
|
| 192 |
else:
|
| 193 |
self.experiences['dissident_experiences'].append(0)
|
| 194 |
+
self.experiences['supporter_experiences'].append(1)
|
| 195 |
+
partner.experiences['dissident_experiences'].append(1 * self.model.social_learning_factor)
|
| 196 |
+
partner.experiences['supporter_experiences'].append(0)
|
| 197 |
+
else:
|
| 198 |
+
partner.experiences['dissident_experiences'].append(0)
|
| 199 |
+
partner.experiences['supporter_experiences'].append(1 * self.model.social_learning_factor)
|
| 200 |
+
|
| 201 |
+
elif ntype == 'social_media':
|
| 202 |
+
if partner.dissident:
|
| 203 |
+
self.experiences['dissident_experiences'].append(1 * self.model.social_media_factor)
|
| 204 |
+
self.experiences['supporter_experiences'].append(0)
|
| 205 |
+
else:
|
| 206 |
+
self.experiences['dissident_experiences'].append(0)
|
| 207 |
+
self.experiences['supporter_experiences'].append(1 * self.model.social_media_factor)
|
| 208 |
|
| 209 |
def combine_estimations(self):
|
| 210 |
+
# Bounded confidence placeholder; keep harmless
|
| 211 |
if not hasattr(self, "current_estimations"):
|
| 212 |
return
|
| 213 |
values = [list(d.values())[0] for d in self.current_estimations]
|
| 214 |
if len(values) > 0:
|
| 215 |
+
within = [v for v in values if abs(self.estimation - v) <= self.model.bounded_confidence_range]
|
| 216 |
+
if len(within) > 0:
|
| 217 |
+
self.estimation = np.mean(within)
|
| 218 |
|
| 219 |
def step(self):
|
| 220 |
if not hasattr(self, 'current_estimations'):
|
| 221 |
self.current_estimations = []
|
| 222 |
+
for net_id in self.model.networks.keys():
|
| 223 |
+
self.update_estimation(net_id)
|
| 224 |
self.combine_estimations()
|
| 225 |
del self.current_estimations
|
| 226 |
|
| 227 |
class PoliticalModel(Model):
|
|
|
|
|
|
|
| 228 |
def __init__(
|
| 229 |
self,
|
| 230 |
n_agents,
|
|
|
|
| 242 |
intervention_list=None,
|
| 243 |
rng_seed=None,
|
| 244 |
):
|
| 245 |
+
# Ensure Mesa creates self.random
|
| 246 |
try:
|
| 247 |
super().__init__(rng_seed=rng_seed) # Mesa >= 3.0
|
| 248 |
except TypeError:
|
|
|
|
| 262 |
self.print_frequency = print_frequency
|
| 263 |
self.early_stopping_steps = early_stopping_steps
|
| 264 |
self.early_stopping_range = early_stopping_range
|
| 265 |
+
self.bounded_confidence_range = 1.0 # harmless default
|
| 266 |
|
| 267 |
self.mean_estimations = []
|
| 268 |
self.decay_factors = [0.5 ** (i / self.half_life) for i in range(500)]
|
|
|
|
| 269 |
self.running = True
|
| 270 |
self.share_regime_supporters = share_regime_supporters
|
| 271 |
|
|
|
|
| 275 |
# Align attributes across networks and compute homophilies
|
| 276 |
for i, this_network in enumerate(self.networks):
|
| 277 |
self.networks[this_network]["network"] = assign_initial_attributes(
|
| 278 |
+
self.networks[this_network]["network"], self.share_regime_supporters, attr_name='dissident'
|
|
|
|
|
|
|
| 279 |
)
|
| 280 |
if 'homophily' in self.networks[this_network]:
|
| 281 |
self.networks[this_network]["network"] = distribute_attributes(
|
|
|
|
| 285 |
cooling_factor=0.995,
|
| 286 |
attr_name='dissident'
|
| 287 |
)
|
| 288 |
+
self.networks[this_network].setdefault('network_data_to_keep', {})
|
| 289 |
self.networks[this_network]['network_data_to_keep']['actual_homophily'] = compute_homophily(
|
| 290 |
+
self.networks[this_network]["network"], attr_name='dissident'
|
|
|
|
| 291 |
)
|
| 292 |
if i > 0:
|
|
|
|
| 293 |
first_key = next(iter(self.networks))
|
| 294 |
self.networks[this_network]["network"] = reindex_graph_to_match_attributes(
|
| 295 |
+
self.networks[first_key]["network"], self.networks[this_network]["network"], 'dissident'
|
|
|
|
|
|
|
| 296 |
)
|
| 297 |
|
| 298 |
+
# Create agents and id -> agent map
|
| 299 |
self.id2agent = {}
|
| 300 |
first_key = next(iter(self.networks))
|
| 301 |
for i in range(self.num_agents):
|
|
|
|
| 310 |
"Median": compute_median,
|
| 311 |
"STD": compute_std
|
| 312 |
}
|
|
|
|
| 313 |
for this_network in self.networks:
|
| 314 |
if 'network_data_to_keep' in self.networks[this_network]:
|
| 315 |
for key, value in self.networks[this_network]['network_data_to_keep'].items():
|
| 316 |
attr_name = this_network + '_' + key
|
| 317 |
setattr(self, attr_name, value)
|
|
|
|
| 318 |
def reporter(model, attr_name=attr_name):
|
| 319 |
return getattr(model, attr_name)
|
|
|
|
| 320 |
model_reporters[attr_name] = reporter
|
| 321 |
|
| 322 |
if agent_reporters:
|
|
|
|
| 333 |
# Interventions
|
| 334 |
for this_intervention in self.intervention_list:
|
| 335 |
if this_intervention['time'] == len(self.mean_estimations):
|
|
|
|
| 336 |
if this_intervention['type'] == 'threshold_adjustment':
|
| 337 |
self.threshold = max(0, min(1, self.threshold + this_intervention['strength']))
|
|
|
|
| 338 |
if this_intervention['type'] == 'share_adjustment':
|
| 339 |
target_supporter_share = max(0, min(1, self.share_regime_supporters + this_intervention['strength']))
|
| 340 |
+
agents = list(self.schedule.agents)
|
| 341 |
+
current_supporters = sum(not a.dissident for a in agents)
|
|
|
|
| 342 |
total_agents = len(agents)
|
|
|
|
|
|
|
| 343 |
required_supporters = int(target_supporter_share * total_agents)
|
| 344 |
+
to_change = abs(required_supporters - current_supporters)
|
| 345 |
+
if current_supporters / total_agents < target_supporter_share:
|
| 346 |
+
pool = [a for a in agents if a.dissident]
|
| 347 |
+
for a in random.sample(pool, min(to_change, len(pool))):
|
| 348 |
+
a.dissident = False
|
| 349 |
+
else:
|
| 350 |
+
pool = [a for a in agents if not a.dissident]
|
| 351 |
+
for a in random.sample(pool, min(to_change, len(pool))):
|
| 352 |
+
a.dissident = True
|
|
|
|
|
|
|
| 353 |
if this_intervention['type'] == 'social_media_adjustment':
|
| 354 |
+
self.social_media_factor = max(0, min(1, self.social_media_factor + this_intervention['strength']))
|
| 355 |
|
| 356 |
self.schedule.step()
|
| 357 |
+
self.mean_estimations.append(compute_mean(self))
|
|
|
|
| 358 |
|
| 359 |
if len(self.mean_estimations) >= self.early_stopping_steps:
|
| 360 |
+
recent = self.mean_estimations[-self.early_stopping_steps:]
|
| 361 |
+
if max(recent) - min(recent) < self.early_stopping_range:
|
| 362 |
self.running = False
|
| 363 |
|
| 364 |
+
# -----------------------------
|
| 365 |
+
# Runner and plotting
|
| 366 |
+
# -----------------------------
|
| 367 |
def run_and_plot_simulation(
|
| 368 |
separate_agent_types=False,
|
| 369 |
n_agents=300,
|
|
|
|
| 383 |
social_media_network_type_powerlaw_exponent=3,
|
| 384 |
social_media_network_type='Powerlaw',
|
| 385 |
use_social_media_network=False,
|
| 386 |
+
social_media_factor=1.0,
|
| 387 |
rng_seed=None
|
| 388 |
):
|
| 389 |
print(physical_network_type)
|
|
|
|
| 390 |
networks = {}
|
| 391 |
|
| 392 |
# Physical network
|
| 393 |
if physical_network_type == 'Fully Connected':
|
| 394 |
G = nx.complete_graph(n_agents)
|
| 395 |
networks['physical'] = {"network": G, "type": "physical", "positions": nx.circular_layout(G)}
|
|
|
|
| 396 |
elif physical_network_type == "Powerlaw":
|
| 397 |
s = nx.utils.powerlaw_sequence(n_agents, powerlaw_exponent)
|
| 398 |
G = nx.expected_degree_graph(s, selfloops=False)
|
| 399 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 400 |
networks['physical'] = {"network": G, "type": "physical", "positions": nx.kamada_kawai_layout(G)}
|
|
|
|
| 401 |
elif physical_network_type == "Random Geometric":
|
| 402 |
+
pts = np.random.rand(n_agents, 2)
|
| 403 |
+
G = graph_from_coordinates(pts, phys_network_radius)
|
| 404 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 405 |
+
networks['physical'] = {"network": G, "type": "physical", "positions": pts}
|
| 406 |
|
| 407 |
if introduce_physical_homophily_true_false:
|
| 408 |
networks['physical']['homophily'] = physical_homophily
|
| 409 |
+
networks['physical'].setdefault('network_data_to_keep', {})
|
| 410 |
|
| 411 |
# Social media network
|
| 412 |
if use_social_media_network:
|
| 413 |
if social_media_network_type == 'Fully Connected':
|
| 414 |
G = nx.complete_graph(n_agents)
|
| 415 |
networks['social_media'] = {"network": G, "type": "social_media", "positions": nx.circular_layout(G)}
|
|
|
|
| 416 |
elif social_media_network_type == "Powerlaw":
|
| 417 |
s = nx.utils.powerlaw_sequence(n_agents, social_media_network_type_powerlaw_exponent)
|
| 418 |
G = nx.expected_degree_graph(s, selfloops=False)
|
| 419 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 420 |
networks['social_media'] = {"network": G, "type": "social_media", "positions": nx.kamada_kawai_layout(G)}
|
|
|
|
| 421 |
elif social_media_network_type == "Random Geometric":
|
| 422 |
+
pts = np.random.rand(n_agents, 2)
|
| 423 |
+
G = graph_from_coordinates(pts, social_media_network_type_random_geometric_radius)
|
| 424 |
G = nx.convert_node_labels_to_integers(ensure_neighbors(G))
|
| 425 |
+
networks['social_media'] = {"network": G, "type": "social_media", "positions": pts}
|
|
|
|
| 426 |
if introduce_social_media_homophily_true_false:
|
| 427 |
networks['social_media']['homophily'] = social_media_homophily
|
| 428 |
+
networks['social_media'].setdefault('network_data_to_keep', {})
|
|
|
|
|
|
|
| 429 |
|
| 430 |
model = PoliticalModel(
|
| 431 |
n_agents,
|
|
|
|
| 433 |
share_regime_supporters,
|
| 434 |
threshold,
|
| 435 |
social_learning_factor=social_learning_factor,
|
| 436 |
+
social_media_factor=social_media_factor,
|
| 437 |
half_life=half_life,
|
| 438 |
print_agents=False,
|
| 439 |
print_frequency=50,
|
| 440 |
agent_reporters=True,
|
| 441 |
+
intervention_list=[],
|
| 442 |
rng_seed=rng_seed
|
| 443 |
)
|
| 444 |
|
|
|
|
| 448 |
agent_df = model.datacollector.get_agent_vars_dataframe().reset_index()
|
| 449 |
agent_df_pivot = agent_df.pivot(index='Step', columns='AgentID', values='Estimation')
|
| 450 |
|
| 451 |
+
# Time series plot
|
| 452 |
+
fig1, ax = plt.subplots(figsize=(12, 8))
|
| 453 |
if not separate_agent_types:
|
| 454 |
+
for col in agent_df_pivot.columns:
|
| 455 |
+
plt.plot(agent_df_pivot.index, agent_df_pivot[col], color='gray', alpha=0.1)
|
| 456 |
+
mean_est = agent_df_pivot.mean(axis=1)
|
| 457 |
+
plt.plot(mean_est.index, mean_est, color='black', linewidth=2)
|
| 458 |
else:
|
| 459 |
colors = {1: '#d6a44b', 0: '#1b4968'}
|
| 460 |
+
for aid in agent_df_pivot.columns:
|
| 461 |
+
typ = agent_df.loc[agent_df['AgentID'] == aid, 'Dissident'].iloc[0]
|
| 462 |
+
plt.plot(agent_df_pivot.index, agent_df_pivot[aid], color=colors[typ], alpha=0.1)
|
| 463 |
+
for typ, color in colors.items():
|
| 464 |
+
mean_est = agent_df_pivot.loc[:, agent_df[agent_df['Dissident'] == typ]['AgentID']].mean(axis=1)
|
| 465 |
+
plt.plot(mean_est.index, mean_est, color=color, linewidth=2, label='Dissident' if typ == 1 else 'Supporter')
|
|
|
|
|
|
|
|
|
|
| 466 |
plt.legend(loc='lower right')
|
| 467 |
|
| 468 |
plt.title('Agent Estimation Over Time', loc='right')
|
| 469 |
plt.xlabel('Time step')
|
| 470 |
plt.ylabel('Estimation')
|
|
|
|
| 471 |
plt.savefig('run_plot.png', bbox_inches='tight', dpi=400, transparent=True)
|
| 472 |
run_plot = PIL.Image.open('run_plot.png').convert('RGBA')
|
| 473 |
|
| 474 |
# Network plot
|
| 475 |
n_networks = len(networks)
|
| 476 |
+
fig2, axs = plt.subplots(1, n_networks, figsize=(9.5 * n_networks, 8))
|
| 477 |
if n_networks == 1:
|
| 478 |
axs = [axs]
|
| 479 |
|
| 480 |
+
estimations = {a.unique_id: a.estimation for a in model.schedule.agents}
|
| 481 |
+
for idx, (net_id, net_dict) in enumerate(networks.items()):
|
| 482 |
+
net = net_dict['network']
|
| 483 |
+
nx.set_node_attributes(net, estimations, 'estimation')
|
| 484 |
+
pos = net_dict.get('positions', nx.kamada_kawai_layout(net))
|
| 485 |
+
node_colors = [estimations[node] for node in net.nodes]
|
| 486 |
+
axs[idx].set_title(f'Network: {net_id}', loc='right')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
nx.draw_networkx_nodes(
|
| 488 |
+
net, pos, node_size=50, node_color=node_colors,
|
| 489 |
cmap=cmocean.tools.crop_by_percent(cmocean.cm.curl, 20, which='both', N=None),
|
| 490 |
vmin=0, vmax=1, ax=axs[idx]
|
| 491 |
)
|
| 492 |
+
nx.draw_networkx_edges(net, pos, alpha=0.3, ax=axs[idx])
|
|
|
|
| 493 |
sm = mpl.cm.ScalarMappable(
|
| 494 |
cmap=cmocean.tools.crop_by_percent(cmocean.cm.curl, 20, which='both', N=None),
|
| 495 |
norm=plt.Normalize(vmin=0, vmax=1)
|
| 496 |
)
|
| 497 |
sm.set_array([])
|
| 498 |
+
fig2.colorbar(sm, ax=axs[idx])
|
| 499 |
|
| 500 |
plt.savefig('network_plot.png', bbox_inches='tight', dpi=400, transparent=True)
|
| 501 |
network_plot = PIL.Image.open('network_plot.png').convert('RGBA')
|
| 502 |
|
| 503 |
return run_plot, network_plot
|
| 504 |
|
| 505 |
+
# -----------------------------
|
| 506 |
+
# Gradio UI
|
| 507 |
+
# -----------------------------
|
| 508 |
import gradio as gr
|
|
|
|
| 509 |
|
| 510 |
with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
|
| 511 |
with gr.Column():
|
|
|
|
| 516 |
with gr.Column():
|
| 517 |
with gr.Group():
|
| 518 |
separate_agent_types = gr.Checkbox(value=False, label="Separate agent types in plot")
|
| 519 |
+
n_agents_slider = gr.Slider(100, 500, step=10, label="Number of Agents", value=150)
|
| 520 |
+
share_regime_slider = gr.Slider(0.0, 1.0, step=0.01, label="Share of Regime Supporters", value=0.4)
|
| 521 |
+
threshold_slider = gr.Slider(0.0, 1.0, step=0.01, label="Threshold", value=0.5)
|
| 522 |
+
social_learning_slider = gr.Slider(0.0, 2.0, step=0.1, label="Social Learning Factor", value=1.0)
|
| 523 |
+
steps_slider = gr.Slider(10, 100, step=5, label="Simulation Steps", value=40)
|
| 524 |
+
half_life_slider = gr.Slider(5, 50, step=5, label="Half-Life", value=20)
|
|
|
|
| 525 |
|
| 526 |
# Physical network settings
|
| 527 |
with gr.Group():
|
| 528 |
gr.Markdown("""**Physical Network Settings:**""")
|
| 529 |
introduce_physical_homophily_true_false = gr.Checkbox(value=False, label="Stipulate Homophily")
|
|
|
|
| 530 |
with gr.Group(visible=False) as homophily_group:
|
| 531 |
physical_homophily = gr.Slider(0, 1, label="Homophily", info='How much homophily to stipulate.')
|
|
|
|
| 532 |
def update_homophily_group_visibility(checkbox_state):
|
| 533 |
return {homophily_group: gr.Group(visible=checkbox_state)}
|
|
|
|
| 534 |
introduce_physical_homophily_true_false.change(
|
| 535 |
update_homophily_group_visibility,
|
| 536 |
inputs=introduce_physical_homophily_true_false,
|
|
|
|
| 539 |
|
| 540 |
physical_network_type = gr.Dropdown(label="Physical Network Type", value="Fully Connected",
|
| 541 |
choices=["Fully Connected", "Random Geometric", "Powerlaw"])
|
|
|
|
| 542 |
with gr.Group(visible=True) as physical_network_type_fully_connected_group:
|
| 543 |
gr.Markdown("""""")
|
|
|
|
| 544 |
with gr.Group(visible=False) as physical_network_type_random_geometric_group:
|
| 545 |
+
physical_network_type_random_geometric_radius = gr.Slider(0.0, 0.5, label="Radius")
|
|
|
|
| 546 |
with gr.Group(visible=False) as physical_network_type_powerlaw_group:
|
| 547 |
+
physical_network_type_random_geometric_powerlaw_exponent = gr.Slider(0.0, 5.2, label="Powerlaw Exponent")
|
|
|
|
| 548 |
def update_sliders(option):
|
| 549 |
return {
|
| 550 |
physical_network_type_fully_connected_group: gr.Group(visible=option == "Fully Connected"),
|
| 551 |
physical_network_type_random_geometric_group: gr.Group(visible=option == "Random Geometric"),
|
| 552 |
physical_network_type_powerlaw_group: gr.Group(visible=option == "Powerlaw")
|
| 553 |
}
|
|
|
|
| 554 |
physical_network_type.change(
|
| 555 |
update_sliders,
|
| 556 |
inputs=physical_network_type,
|
| 557 |
+
outputs=[
|
| 558 |
+
physical_network_type_fully_connected_group,
|
| 559 |
+
physical_network_type_random_geometric_group,
|
| 560 |
+
physical_network_type_powerlaw_group
|
| 561 |
+
]
|
| 562 |
)
|
| 563 |
|
| 564 |
# Social media settings
|
| 565 |
use_social_media_network = gr.Checkbox(value=False, label="Use social media network")
|
| 566 |
with gr.Group(visible=False) as social_media_group:
|
| 567 |
gr.Markdown("""**Social Media Network Settings:**""")
|
|
|
|
| 568 |
social_media_factor = gr.Slider(0, 2, label="Social Media Factor",
|
| 569 |
info='Weight of social media vs learning in the real world.',
|
| 570 |
value=1.0)
|
| 571 |
introduce_social_media_homophily_true_false = gr.Checkbox(value=False, label="Stipulate Homophily")
|
|
|
|
| 572 |
with gr.Group(visible=False) as social_media_homophily_group:
|
| 573 |
social_media_homophily = gr.Slider(0, 1, label="Homophily", info='How much homophily to stipulate in social media network.')
|
|
|
|
| 574 |
def update_social_media_homophily_group_visibility(checkbox_state):
|
| 575 |
return {social_media_homophily_group: gr.Group(visible=checkbox_state)}
|
|
|
|
| 576 |
introduce_social_media_homophily_true_false.change(
|
| 577 |
update_social_media_homophily_group_visibility,
|
| 578 |
inputs=introduce_social_media_homophily_true_false,
|
| 579 |
outputs=social_media_homophily_group
|
| 580 |
)
|
|
|
|
| 581 |
social_media_network_type = gr.Dropdown(label="Social Media Network Type", value="Fully Connected",
|
| 582 |
choices=["Fully Connected", "Random Geometric", "Powerlaw"])
|
|
|
|
| 583 |
with gr.Group(visible=True) as social_media_network_type_fully_connected_group:
|
| 584 |
gr.Markdown("""""")
|
|
|
|
| 585 |
with gr.Group(visible=False) as social_media_network_type_random_geometric_group:
|
| 586 |
+
social_media_network_type_random_geometric_radius = gr.Slider(0.0, 0.5, label="Radius")
|
|
|
|
| 587 |
with gr.Group(visible=False) as social_media_network_type_powerlaw_group:
|
| 588 |
+
social_media_network_type_powerlaw_exponent = gr.Slider(0.0, 5.2, label="Powerlaw Exponent")
|
|
|
|
| 589 |
def update_social_media_network_sliders(option):
|
| 590 |
return {
|
| 591 |
social_media_network_type_fully_connected_group: gr.Group(visible=option == "Fully Connected"),
|
| 592 |
social_media_network_type_random_geometric_group: gr.Group(visible=option == "Random Geometric"),
|
| 593 |
social_media_network_type_powerlaw_group: gr.Group(visible=option == "Powerlaw")
|
| 594 |
}
|
|
|
|
| 595 |
social_media_network_type.change(
|
| 596 |
update_social_media_network_sliders,
|
| 597 |
inputs=social_media_network_type,
|
| 598 |
+
outputs=[
|
| 599 |
+
social_media_network_type_fully_connected_group,
|
| 600 |
+
social_media_network_type_random_geometric_group,
|
| 601 |
+
social_media_network_type_powerlaw_group
|
| 602 |
+
]
|
| 603 |
)
|
|
|
|
| 604 |
def update_social_media_group_visibility(checkbox_state):
|
| 605 |
return {social_media_group: gr.Group(visible=checkbox_state)}
|
|
|
|
| 606 |
use_social_media_network.change(
|
| 607 |
update_social_media_group_visibility,
|
| 608 |
inputs=use_social_media_network,
|
|
|
|
| 615 |
network_output = gr.Image(label="Networks")
|
| 616 |
|
| 617 |
def run_simulation_and_plot(*args):
|
| 618 |
+
return run_and_plot_simulation(*args)
|
|
|
|
| 619 |
|
| 620 |
button.click(
|
| 621 |
run_simulation_and_plot,
|
|
|
|
| 638 |
social_media_network_type_powerlaw_exponent,
|
| 639 |
social_media_network_type,
|
| 640 |
use_social_media_network,
|
| 641 |
+
social_media_factor,
|
| 642 |
],
|
| 643 |
outputs=[plot_output, network_output]
|
| 644 |
)
|
| 645 |
|
|
|
|
| 646 |
if __name__ == "__main__":
|
| 647 |
demo.launch(debug=True)
|